summaryrefslogtreecommitdiffstats
path: root/dom/media/platforms
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /dom/media/platforms
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'dom/media/platforms')
-rw-r--r--dom/media/platforms/MediaTelemetryConstants.h22
-rw-r--r--dom/media/platforms/PDMFactory.cpp482
-rw-r--r--dom/media/platforms/PDMFactory.h84
-rw-r--r--dom/media/platforms/PlatformDecoderModule.h309
-rw-r--r--dom/media/platforms/ReorderQueue.h29
-rw-r--r--dom/media/platforms/agnostic/AgnosticDecoderModule.cpp63
-rw-r--r--dom/media/platforms/agnostic/AgnosticDecoderModule.h34
-rw-r--r--dom/media/platforms/agnostic/BlankDecoderModule.cpp278
-rw-r--r--dom/media/platforms/agnostic/OpusDecoder.cpp356
-rw-r--r--dom/media/platforms/agnostic/OpusDecoder.h76
-rw-r--r--dom/media/platforms/agnostic/TheoraDecoder.cpp240
-rw-r--r--dom/media/platforms/agnostic/TheoraDecoder.h64
-rw-r--r--dom/media/platforms/agnostic/VPXDecoder.cpp253
-rw-r--r--dom/media/platforms/agnostic/VPXDecoder.h69
-rw-r--r--dom/media/platforms/agnostic/VorbisDecoder.cpp349
-rw-r--r--dom/media/platforms/agnostic/VorbisDecoder.h66
-rw-r--r--dom/media/platforms/agnostic/WAVDecoder.cpp158
-rw-r--r--dom/media/platforms/agnostic/WAVDecoder.h41
-rw-r--r--dom/media/platforms/agnostic/eme/EMEAudioDecoder.cpp44
-rw-r--r--dom/media/platforms/agnostic/eme/EMEAudioDecoder.h37
-rw-r--r--dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp307
-rw-r--r--dom/media/platforms/agnostic/eme/EMEDecoderModule.h52
-rw-r--r--dom/media/platforms/agnostic/eme/EMEVideoDecoder.cpp68
-rw-r--r--dom/media/platforms/agnostic/eme/EMEVideoDecoder.h45
-rw-r--r--dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp86
-rw-r--r--dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h58
-rw-r--r--dom/media/platforms/agnostic/eme/moz.build23
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp306
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h112
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp172
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPDecoderModule.h57
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp387
-rw-r--r--dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h122
-rw-r--r--dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp90
-rw-r--r--dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h181
-rw-r--r--dom/media/platforms/agnostic/gmp/moz.build24
-rw-r--r--dom/media/platforms/android/AndroidDecoderModule.cpp227
-rw-r--r--dom/media/platforms/android/AndroidDecoderModule.h34
-rw-r--r--dom/media/platforms/android/MediaCodecDataDecoder.cpp698
-rw-r--r--dom/media/platforms/android/MediaCodecDataDecoder.h126
-rw-r--r--dom/media/platforms/android/RemoteDataDecoder.cpp489
-rw-r--r--dom/media/platforms/android/RemoteDataDecoder.h62
-rw-r--r--dom/media/platforms/apple/AppleATDecoder.cpp722
-rw-r--r--dom/media/platforms/apple/AppleATDecoder.h78
-rw-r--r--dom/media/platforms/apple/AppleCMFunctions.h12
-rw-r--r--dom/media/platforms/apple/AppleCMLinker.cpp104
-rw-r--r--dom/media/platforms/apple/AppleCMLinker.h46
-rw-r--r--dom/media/platforms/apple/AppleDecoderModule.cpp113
-rw-r--r--dom/media/platforms/apple/AppleDecoderModule.h48
-rw-r--r--dom/media/platforms/apple/AppleUtils.h98
-rw-r--r--dom/media/platforms/apple/AppleVTDecoder.cpp674
-rw-r--r--dom/media/platforms/apple/AppleVTDecoder.h126
-rw-r--r--dom/media/platforms/apple/AppleVTFunctions.h14
-rw-r--r--dom/media/platforms/apple/AppleVTLinker.cpp104
-rw-r--r--dom/media/platforms/apple/AppleVTLinker.h46
-rw-r--r--dom/media/platforms/apple/VideoToolbox/VideoToolbox.h94
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp233
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h44
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp204
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDataDecoder.h72
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDecoderModule.cpp13
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegDecoderModule.h86
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp178
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibWrapper.h94
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLibs.h39
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegLog.h14
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp169
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.h46
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp393
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h127
-rw-r--r--dom/media/platforms/ffmpeg/README_mozilla11
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1504
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avcodec.h5418
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avfft.h118
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vaapi.h189
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vdpau.h253
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/version.h210
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/attributes.h168
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avconfig.h7
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avutil.h343
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/buffer.h274
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/channel_layout.h223
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/common.h519
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/cpu.h117
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/dict.h198
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/error.h126
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/frame.h713
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h77
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/log.h359
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/macros.h50
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mathematics.h165
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mem.h406
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/pixfmt.h469
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/rational.h173
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/samplefmt.h271
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/version.h129
-rw-r--r--dom/media/platforms/ffmpeg/ffmpeg57/moz.build25
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp113
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h31
-rw-r--r--dom/media/platforms/ffmpeg/ffvpx/moz.build45
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/COPYING.LGPLv2.1504
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/avcodec.h4761
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/avfft.h99
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/dxva2.h71
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/old_codec_ids.h398
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/opt.h34
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/vaapi.h167
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/vda.h144
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/vdpau.h88
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/version.h126
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavcodec/xvmc.h151
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/adler32.h43
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/aes.h57
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/attributes.h136
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/audio_fifo.h146
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/audioconvert.h130
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/avassert.h66
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/avconfig.h6
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/avstring.h175
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/avutil.h326
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/base64.h65
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/blowfish.h77
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/bprint.h169
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/bswap.h109
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/common.h398
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/cpu.h56
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/crc.h44
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/dict.h121
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/error.h81
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/eval.h113
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/fifo.h141
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/file.h52
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/imgutils.h138
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat.h73
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat_readwrite.h40
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/intreadwrite.h522
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/lfg.h62
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/log.h172
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/lzo.h77
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/mathematics.h122
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/md5.h46
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/mem.h136
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/old_pix_fmts.h171
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/opt.h591
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/parseutils.h124
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/pixdesc.h177
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/pixfmt.h198
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/random_seed.h44
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/rational.h144
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/samplefmt.h148
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/sha.h66
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/time.h41
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/timecode.h140
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/timestamp.h74
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/version.h132
-rw-r--r--dom/media/platforms/ffmpeg/libav53/include/libavutil/xtea.h62
-rw-r--r--dom/media/platforms/ffmpeg/libav53/moz.build23
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/COPYING.LGPLv2.1504
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/avcodec.h4658
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/avfft.h116
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/dxva2.h88
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/old_codec_ids.h366
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/vaapi.h173
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/vda.h217
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/vdpau.h94
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/version.h95
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavcodec/xvmc.h168
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/adler32.h43
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/aes.h67
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/attributes.h122
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/audio_fifo.h146
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/audioconvert.h6
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/avassert.h66
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/avconfig.h6
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/avstring.h191
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/avutil.h275
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/base64.h65
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/blowfish.h76
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/bswap.h109
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/channel_layout.h182
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/common.h406
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/cpu.h84
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/crc.h74
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/dict.h129
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/error.h83
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/eval.h113
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/fifo.h131
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/file.h54
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/imgutils.h138
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat.h77
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat_readwrite.h40
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/intreadwrite.h549
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/lfg.h62
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/log.h173
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/lzo.h66
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/mathematics.h111
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/md5.h51
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/mem.h183
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/old_pix_fmts.h128
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/opt.h516
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/parseutils.h124
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/pixdesc.h223
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/pixfmt.h268
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/random_seed.h44
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/rational.h155
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/samplefmt.h220
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/sha.h76
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/time.h39
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/version.h87
-rw-r--r--dom/media/platforms/ffmpeg/libav54/include/libavutil/xtea.h61
-rw-r--r--dom/media/platforms/ffmpeg/libav54/moz.build23
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/COPYING.LGPLv2.1504
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/avcodec.h4356
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/avfft.h118
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/dxva2.h88
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/vaapi.h173
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/vda.h142
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/vdpau.h189
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/version.h127
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavcodec/xvmc.h174
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/adler32.h43
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/aes.h67
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/attributes.h126
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/audio_fifo.h146
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/audioconvert.h6
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/avassert.h66
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/avconfig.h6
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/avstring.h226
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/avutil.h284
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/base64.h65
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/blowfish.h76
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/bswap.h111
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/buffer.h267
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/channel_layout.h186
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/common.h406
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/cpu.h87
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/crc.h74
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/dict.h146
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/downmix_info.h114
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/error.h82
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/eval.h113
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/fifo.h131
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/file.h54
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/frame.h552
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/hmac.h95
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/imgutils.h138
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/intfloat.h77
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/intreadwrite.h549
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/lfg.h62
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/log.h262
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/lzo.h66
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/macros.h48
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/mathematics.h111
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/md5.h51
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/mem.h265
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/old_pix_fmts.h134
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/opt.h516
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/parseutils.h124
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/pixdesc.h276
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/pixfmt.h283
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/random_seed.h44
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/rational.h155
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/samplefmt.h220
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/sha.h76
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/stereo3d.h147
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/time.h39
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/version.h116
-rw-r--r--dom/media/platforms/ffmpeg/libav55/include/libavutil/xtea.h61
-rw-r--r--dom/media/platforms/ffmpeg/libav55/moz.build25
-rw-r--r--dom/media/platforms/ffmpeg/moz.build22
-rw-r--r--dom/media/platforms/gonk/GonkAudioDecoderManager.cpp268
-rw-r--r--dom/media/platforms/gonk/GonkAudioDecoderManager.h59
-rw-r--r--dom/media/platforms/gonk/GonkDecoderModule.cpp63
-rw-r--r--dom/media/platforms/gonk/GonkDecoderModule.h37
-rw-r--r--dom/media/platforms/gonk/GonkMediaDataDecoder.cpp385
-rw-r--r--dom/media/platforms/gonk/GonkMediaDataDecoder.h214
-rw-r--r--dom/media/platforms/gonk/GonkVideoDecoderManager.cpp772
-rw-r--r--dom/media/platforms/gonk/GonkVideoDecoderManager.h149
-rw-r--r--dom/media/platforms/gonk/moz.build39
-rw-r--r--dom/media/platforms/moz.build92
-rw-r--r--dom/media/platforms/omx/GonkOmxPlatformLayer.cpp667
-rw-r--r--dom/media/platforms/omx/GonkOmxPlatformLayer.h205
-rw-r--r--dom/media/platforms/omx/OmxDataDecoder.cpp1038
-rw-r--r--dom/media/platforms/omx/OmxDataDecoder.h214
-rw-r--r--dom/media/platforms/omx/OmxDecoderModule.cpp45
-rw-r--r--dom/media/platforms/omx/OmxDecoderModule.h30
-rw-r--r--dom/media/platforms/omx/OmxPlatformLayer.cpp327
-rw-r--r--dom/media/platforms/omx/OmxPlatformLayer.h100
-rw-r--r--dom/media/platforms/omx/OmxPromiseLayer.cpp382
-rw-r--r--dom/media/platforms/omx/OmxPromiseLayer.h252
-rw-r--r--dom/media/platforms/omx/moz.build57
-rw-r--r--dom/media/platforms/wmf/DXVA2Manager.cpp955
-rw-r--r--dom/media/platforms/wmf/DXVA2Manager.h56
-rw-r--r--dom/media/platforms/wmf/MFTDecoder.cpp305
-rw-r--r--dom/media/platforms/wmf/MFTDecoder.h111
-rw-r--r--dom/media/platforms/wmf/WMF.h104
-rw-r--r--dom/media/platforms/wmf/WMFAudioMFTManager.cpp358
-rw-r--r--dom/media/platforms/wmf/WMFAudioMFTManager.h78
-rw-r--r--dom/media/platforms/wmf/WMFDecoderModule.cpp257
-rw-r--r--dom/media/platforms/wmf/WMFDecoderModule.h56
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataDecoder.cpp227
-rw-r--r--dom/media/platforms/wmf/WMFMediaDataDecoder.h147
-rw-r--r--dom/media/platforms/wmf/WMFUtils.cpp311
-rw-r--r--dom/media/platforms/wmf/WMFUtils.h67
-rw-r--r--dom/media/platforms/wmf/WMFVideoMFTManager.cpp1016
-rw-r--r--dom/media/platforms/wmf/WMFVideoMFTManager.h125
-rw-r--r--dom/media/platforms/wmf/moz.build34
-rw-r--r--dom/media/platforms/wrappers/FuzzingWrapper.cpp328
-rw-r--r--dom/media/platforms/wrappers/FuzzingWrapper.h124
-rw-r--r--dom/media/platforms/wrappers/H264Converter.cpp311
-rw-r--r--dom/media/platforms/wrappers/H264Converter.h74
311 files changed, 71079 insertions, 0 deletions
diff --git a/dom/media/platforms/MediaTelemetryConstants.h b/dom/media/platforms/MediaTelemetryConstants.h
new file mode 100644
index 000000000..5024949a8
--- /dev/null
+++ b/dom/media/platforms/MediaTelemetryConstants.h
@@ -0,0 +1,22 @@
+/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#ifndef dom_media_platforms_MediaTelemetryConstants_h___
+#define dom_media_platforms_MediaTelemetryConstants_h___
+
+namespace mozilla {
+namespace media {
+
+enum class MediaDecoderBackend : uint32_t
+{
+ WMFSoftware = 0,
+ WMFDXVA2D3D9 = 1,
+ WMFDXVA2D3D11 = 2
+};
+
+} // namespace media
+} // namespace mozilla
+
+#endif // dom_media_platforms_MediaTelemetryConstants_h___
diff --git a/dom/media/platforms/PDMFactory.cpp b/dom/media/platforms/PDMFactory.cpp
new file mode 100644
index 000000000..a72d910f5
--- /dev/null
+++ b/dom/media/platforms/PDMFactory.cpp
@@ -0,0 +1,482 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "PDMFactory.h"
+
+#ifdef XP_WIN
+#include "WMFDecoderModule.h"
+
+#endif
+#ifdef MOZ_FFVPX
+#include "FFVPXRuntimeLinker.h"
+#endif
+#ifdef MOZ_FFMPEG
+#include "FFmpegRuntimeLinker.h"
+#endif
+#ifdef MOZ_APPLEMEDIA
+#include "AppleDecoderModule.h"
+#endif
+#ifdef MOZ_GONK_MEDIACODEC
+#include "GonkDecoderModule.h"
+#endif
+#ifdef MOZ_WIDGET_ANDROID
+#include "AndroidDecoderModule.h"
+#endif
+#include "GMPDecoderModule.h"
+
+#include "mozilla/CDMProxy.h"
+#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/TaskQueue.h"
+
+#include "MediaInfo.h"
+#include "MediaPrefs.h"
+#include "FuzzingWrapper.h"
+#include "H264Converter.h"
+
+#include "AgnosticDecoderModule.h"
+#include "EMEDecoderModule.h"
+
+#include "DecoderDoctorDiagnostics.h"
+
+#include "MP4Decoder.h"
+#include "mozilla/dom/RemoteVideoDecoder.h"
+
+#ifdef XP_WIN
+#include "mozilla/WindowsVersion.h"
+#endif
+
+#include "mp4_demuxer/H264.h"
+
+namespace mozilla {
+
+extern already_AddRefed<PlatformDecoderModule> CreateAgnosticDecoderModule();
+extern already_AddRefed<PlatformDecoderModule> CreateBlankDecoderModule();
+
+class PDMFactoryImpl final {
+public:
+ PDMFactoryImpl()
+ {
+#ifdef XP_WIN
+ WMFDecoderModule::Init();
+#endif
+#ifdef MOZ_APPLEMEDIA
+ AppleDecoderModule::Init();
+#endif
+#ifdef MOZ_FFVPX
+ FFVPXRuntimeLinker::Init();
+#endif
+#ifdef MOZ_FFMPEG
+ FFmpegRuntimeLinker::Init();
+#endif
+ }
+};
+
+StaticAutoPtr<PDMFactoryImpl> PDMFactory::sInstance;
+StaticMutex PDMFactory::sMonitor;
+
+class SupportChecker
+{
+public:
+ enum class Reason : uint8_t
+ {
+ kSupported,
+ kVideoFormatNotSupported,
+ kAudioFormatNotSupported,
+ kUnknown,
+ };
+
+ struct CheckResult
+ {
+ explicit CheckResult(Reason aReason,
+ MediaResult aResult = MediaResult(NS_OK))
+ : mReason(aReason),
+ mMediaResult(mozilla::Move(aResult))
+ {}
+ CheckResult(const CheckResult& aOther) = default;
+ CheckResult(CheckResult&& aOther) = default;
+ CheckResult& operator=(const CheckResult& aOther) = default;
+ CheckResult& operator=(CheckResult&& aOther) = default;
+
+ Reason mReason;
+ MediaResult mMediaResult;
+ };
+
+ template<class Func>
+ void
+ AddToCheckList(Func&& aChecker)
+ {
+ mCheckerList.AppendElement(mozilla::Forward<Func>(aChecker));
+ }
+
+ void
+ AddMediaFormatChecker(const TrackInfo& aTrackConfig)
+ {
+ if (aTrackConfig.IsVideo()) {
+ auto mimeType = aTrackConfig.GetAsVideoInfo()->mMimeType;
+ RefPtr<MediaByteBuffer> extraData = aTrackConfig.GetAsVideoInfo()->mExtraData;
+ AddToCheckList(
+ [mimeType, extraData]() {
+ if (MP4Decoder::IsH264(mimeType)) {
+ mp4_demuxer::SPSData spsdata;
+ // WMF H.264 Video Decoder and Apple ATDecoder
+ // do not support YUV444 format.
+ // For consistency, all decoders should be checked.
+ if (mp4_demuxer::H264::DecodeSPSFromExtraData(extraData, spsdata) &&
+ (spsdata.profile_idc == 244 /* Hi444PP */ ||
+ spsdata.chroma_format_idc == PDMFactory::kYUV444)) {
+ return CheckResult(
+ SupportChecker::Reason::kVideoFormatNotSupported,
+ MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Decoder may not have the capability to handle"
+ " the requested video format"
+ " with YUV444 chroma subsampling.")));
+ }
+ }
+ return CheckResult(SupportChecker::Reason::kSupported);
+ });
+ }
+ }
+
+ SupportChecker::CheckResult
+ Check()
+ {
+ for (auto& checker : mCheckerList) {
+ auto result = checker();
+ if (result.mReason != SupportChecker::Reason::kSupported) {
+ return result;
+ }
+ }
+ return CheckResult(SupportChecker::Reason::kSupported);
+ }
+
+ void Clear() { mCheckerList.Clear(); }
+
+private:
+ nsTArray<mozilla::function<CheckResult()>> mCheckerList;
+}; // SupportChecker
+
+PDMFactory::PDMFactory()
+{
+ EnsureInit();
+ CreatePDMs();
+ CreateBlankPDM();
+}
+
+PDMFactory::~PDMFactory()
+{
+}
+
+void
+PDMFactory::EnsureInit() const
+{
+ {
+ StaticMutexAutoLock mon(sMonitor);
+ if (sInstance) {
+ // Quick exit if we already have an instance.
+ return;
+ }
+ if (NS_IsMainThread()) {
+ // On the main thread and holding the lock -> Create instance.
+ sInstance = new PDMFactoryImpl();
+ ClearOnShutdown(&sInstance);
+ return;
+ }
+ }
+
+ // Not on the main thread -> Sync-dispatch creation to main thread.
+ nsCOMPtr<nsIThread> mainThread = do_GetMainThread();
+ nsCOMPtr<nsIRunnable> runnable =
+ NS_NewRunnableFunction([]() {
+ StaticMutexAutoLock mon(sMonitor);
+ if (!sInstance) {
+ sInstance = new PDMFactoryImpl();
+ ClearOnShutdown(&sInstance);
+ }
+ });
+ SyncRunnable::DispatchToThread(mainThread, runnable);
+}
+
+already_AddRefed<MediaDataDecoder>
+PDMFactory::CreateDecoder(const CreateDecoderParams& aParams)
+{
+ if (aParams.mUseBlankDecoder) {
+ MOZ_ASSERT(mBlankPDM);
+ return CreateDecoderWithPDM(mBlankPDM, aParams);
+ }
+
+ const TrackInfo& config = aParams.mConfig;
+ bool isEncrypted = mEMEPDM && config.mCrypto.mValid;
+
+ if (isEncrypted) {
+ return CreateDecoderWithPDM(mEMEPDM, aParams);
+ }
+
+ DecoderDoctorDiagnostics* diagnostics = aParams.mDiagnostics;
+ if (diagnostics) {
+ // If libraries failed to load, the following loop over mCurrentPDMs
+ // will not even try to use them. So we record failures now.
+ if (mWMFFailedToLoad) {
+ diagnostics->SetWMFFailedToLoad();
+ }
+ if (mFFmpegFailedToLoad) {
+ diagnostics->SetFFmpegFailedToLoad();
+ }
+ if (mGMPPDMFailedToStartup) {
+ diagnostics->SetGMPPDMFailedToStartup();
+ }
+ }
+
+ for (auto& current : mCurrentPDMs) {
+ if (!current->SupportsMimeType(config.mMimeType, diagnostics)) {
+ continue;
+ }
+ RefPtr<MediaDataDecoder> m = CreateDecoderWithPDM(current, aParams);
+ if (m) {
+ return m.forget();
+ }
+ }
+ NS_WARNING("Unable to create a decoder, no platform found.");
+ return nullptr;
+}
+
+already_AddRefed<MediaDataDecoder>
+PDMFactory::CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
+ const CreateDecoderParams& aParams)
+{
+ MOZ_ASSERT(aPDM);
+ RefPtr<MediaDataDecoder> m;
+ MediaResult* result = aParams.mError;
+
+ SupportChecker supportChecker;
+ const TrackInfo& config = aParams.mConfig;
+ supportChecker.AddMediaFormatChecker(config);
+
+ auto checkResult = supportChecker.Check();
+ if (checkResult.mReason != SupportChecker::Reason::kSupported) {
+ DecoderDoctorDiagnostics* diagnostics = aParams.mDiagnostics;
+ if (checkResult.mReason == SupportChecker::Reason::kVideoFormatNotSupported) {
+ if (diagnostics) {
+ diagnostics->SetVideoNotSupported();
+ }
+ if (result) {
+ *result = checkResult.mMediaResult;
+ }
+ } else if (checkResult.mReason == SupportChecker::Reason::kAudioFormatNotSupported) {
+ if (diagnostics) {
+ diagnostics->SetAudioNotSupported();
+ }
+ if (result) {
+ *result = checkResult.mMediaResult;
+ }
+ }
+ return nullptr;
+ }
+
+ if (config.IsAudio()) {
+ m = aPDM->CreateAudioDecoder(aParams);
+ return m.forget();
+ }
+
+ if (!config.IsVideo()) {
+ *result = MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Decoder configuration error, expected audio or video."));
+ return nullptr;
+ }
+
+ MediaDataDecoderCallback* callback = aParams.mCallback;
+ RefPtr<DecoderCallbackFuzzingWrapper> callbackWrapper;
+ if (MediaPrefs::PDMFuzzingEnabled()) {
+ callbackWrapper = new DecoderCallbackFuzzingWrapper(callback);
+ callbackWrapper->SetVideoOutputMinimumInterval(
+ TimeDuration::FromMilliseconds(MediaPrefs::PDMFuzzingInterval()));
+ callbackWrapper->SetDontDelayInputExhausted(!MediaPrefs::PDMFuzzingDelayInputExhausted());
+ callback = callbackWrapper.get();
+ }
+
+ CreateDecoderParams params = aParams;
+ params.mCallback = callback;
+
+ if (MP4Decoder::IsH264(config.mMimeType) && !aParams.mUseBlankDecoder) {
+ RefPtr<H264Converter> h = new H264Converter(aPDM, params);
+ const nsresult rv = h->GetLastError();
+ if (NS_SUCCEEDED(rv) || rv == NS_ERROR_NOT_INITIALIZED) {
+ // The H264Converter either successfully created the wrapped decoder,
+ // or there wasn't enough AVCC data to do so. Otherwise, there was some
+ // problem, for example WMF DLLs were missing.
+ m = h.forget();
+ }
+ } else {
+ m = aPDM->CreateVideoDecoder(params);
+ }
+
+ if (callbackWrapper && m) {
+ m = new DecoderFuzzingWrapper(m.forget(), callbackWrapper.forget());
+ }
+
+ return m.forget();
+}
+
+bool
+PDMFactory::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ UniquePtr<TrackInfo> trackInfo = CreateTrackInfoWithMIMEType(aMimeType);
+ if (!trackInfo) {
+ return false;
+ }
+ return Supports(*trackInfo, aDiagnostics);
+}
+
+bool
+PDMFactory::Supports(const TrackInfo& aTrackInfo,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ if (mEMEPDM) {
+ return mEMEPDM->Supports(aTrackInfo, aDiagnostics);
+ }
+ RefPtr<PlatformDecoderModule> current = GetDecoder(aTrackInfo, aDiagnostics);
+ return !!current;
+}
+
+void
+PDMFactory::CreatePDMs()
+{
+ RefPtr<PlatformDecoderModule> m;
+
+ if (MediaPrefs::PDMUseBlankDecoder()) {
+ m = CreateBlankDecoderModule();
+ StartupPDM(m);
+ // The Blank PDM SupportsMimeType reports true for all codecs; the creation
+ // of its decoder is infallible. As such it will be used for all media, we
+ // can stop creating more PDM from this point.
+ return;
+ }
+
+#ifdef MOZ_WIDGET_ANDROID
+ if(MediaPrefs::PDMAndroidMediaCodecPreferred() &&
+ MediaPrefs::PDMAndroidMediaCodecEnabled()) {
+ m = new AndroidDecoderModule();
+ StartupPDM(m);
+ }
+#endif
+#ifdef XP_WIN
+ if (MediaPrefs::PDMWMFEnabled() && IsVistaOrLater() && !IsWin7AndPre2000Compatible()) {
+ // *Only* use WMF on Vista and later, as if Firefox is run in Windows 95
+ // compatibility mode on Windows 7 (it does happen!) we may crash trying
+ // to startup WMF. So we need to detect the OS version here, as in
+ // compatibility mode IsVistaOrLater() and friends behave as if we're on
+ // the emulated version of Windows. See bug 1279171.
+ // Additionally, we don't want to start the RemoteDecoderModule if we
+ // expect it's not going to work (i.e. on Windows older than Vista).
+ // IsWin7AndPre2000Compatible() uses GetVersionEx as the user specified OS version can
+ // be reflected when compatibility mode is in effect.
+ m = new WMFDecoderModule();
+ RefPtr<PlatformDecoderModule> remote = new dom::RemoteDecoderModule(m);
+ StartupPDM(remote);
+ mWMFFailedToLoad = !StartupPDM(m);
+ } else {
+ mWMFFailedToLoad = MediaPrefs::DecoderDoctorWMFDisabledIsFailure();
+ }
+#endif
+#ifdef MOZ_FFVPX
+ if (MediaPrefs::PDMFFVPXEnabled()) {
+ m = FFVPXRuntimeLinker::CreateDecoderModule();
+ StartupPDM(m);
+ }
+#endif
+#ifdef MOZ_FFMPEG
+ if (MediaPrefs::PDMFFmpegEnabled()) {
+ m = FFmpegRuntimeLinker::CreateDecoderModule();
+ mFFmpegFailedToLoad = !StartupPDM(m);
+ } else {
+ mFFmpegFailedToLoad = false;
+ }
+#endif
+#ifdef MOZ_APPLEMEDIA
+ m = new AppleDecoderModule();
+ StartupPDM(m);
+#endif
+#ifdef MOZ_GONK_MEDIACODEC
+ if (MediaPrefs::PDMGonkDecoderEnabled()) {
+ m = new GonkDecoderModule();
+ StartupPDM(m);
+ }
+#endif
+#ifdef MOZ_WIDGET_ANDROID
+ if(MediaPrefs::PDMAndroidMediaCodecEnabled()){
+ m = new AndroidDecoderModule();
+ StartupPDM(m);
+ }
+#endif
+
+ m = new AgnosticDecoderModule();
+ StartupPDM(m);
+
+ if (MediaPrefs::PDMGMPEnabled()) {
+ m = new GMPDecoderModule();
+ mGMPPDMFailedToStartup = !StartupPDM(m);
+ } else {
+ mGMPPDMFailedToStartup = false;
+ }
+}
+
+void
+PDMFactory::CreateBlankPDM()
+{
+ mBlankPDM = CreateBlankDecoderModule();
+ MOZ_ASSERT(mBlankPDM && NS_SUCCEEDED(mBlankPDM->Startup()));
+}
+
+bool
+PDMFactory::StartupPDM(PlatformDecoderModule* aPDM)
+{
+ if (aPDM && NS_SUCCEEDED(aPDM->Startup())) {
+ mCurrentPDMs.AppendElement(aPDM);
+ return true;
+ }
+ return false;
+}
+
+already_AddRefed<PlatformDecoderModule>
+PDMFactory::GetDecoder(const TrackInfo& aTrackInfo,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ if (aDiagnostics) {
+ // If libraries failed to load, the following loop over mCurrentPDMs
+ // will not even try to use them. So we record failures now.
+ if (mWMFFailedToLoad) {
+ aDiagnostics->SetWMFFailedToLoad();
+ }
+ if (mFFmpegFailedToLoad) {
+ aDiagnostics->SetFFmpegFailedToLoad();
+ }
+ if (mGMPPDMFailedToStartup) {
+ aDiagnostics->SetGMPPDMFailedToStartup();
+ }
+ }
+
+ RefPtr<PlatformDecoderModule> pdm;
+ for (auto& current : mCurrentPDMs) {
+ if (current->Supports(aTrackInfo, aDiagnostics)) {
+ pdm = current;
+ break;
+ }
+ }
+ return pdm.forget();
+}
+
+void
+PDMFactory::SetCDMProxy(CDMProxy* aProxy)
+{
+ RefPtr<PDMFactory> m = new PDMFactory();
+ mEMEPDM = new EMEDecoderModule(aProxy, m);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/PDMFactory.h b/dom/media/platforms/PDMFactory.h
new file mode 100644
index 000000000..94be7e928
--- /dev/null
+++ b/dom/media/platforms/PDMFactory.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(PDMFactory_h_)
+#define PDMFactory_h_
+
+#include "PlatformDecoderModule.h"
+#include "mozilla/Function.h"
+#include "mozilla/StaticMutex.h"
+
+class CDMProxy;
+
+namespace mozilla {
+
+class DecoderDoctorDiagnostics;
+class PDMFactoryImpl;
+template<class T> class StaticAutoPtr;
+
+class PDMFactory final {
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PDMFactory)
+
+ PDMFactory();
+
+ // Factory method that creates the appropriate PlatformDecoderModule for
+ // the platform we're running on. Caller is responsible for deleting this
+ // instance. It's expected that there will be multiple
+ // PlatformDecoderModules alive at the same time.
+ // This is called on the decode task queue.
+ already_AddRefed<MediaDataDecoder>
+ CreateDecoder(const CreateDecoderParams& aParams);
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const;
+ bool Supports(const TrackInfo& aTrackInfo,
+ DecoderDoctorDiagnostics* aDiagnostics) const;
+
+ // Creates a PlatformDecoderModule that uses a CDMProxy to decrypt or
+ // decrypt-and-decode EME encrypted content. If the CDM only decrypts and
+ // does not decode, we create a PDM and use that to create MediaDataDecoders
+ // that we use on on aTaskQueue to decode the decrypted stream.
+ // This is called on the decode task queue.
+ void SetCDMProxy(CDMProxy* aProxy);
+
+ static constexpr int kYUV400 = 0;
+ static constexpr int kYUV420 = 1;
+ static constexpr int kYUV422 = 2;
+ static constexpr int kYUV444 = 3;
+
+private:
+ virtual ~PDMFactory();
+ void CreatePDMs();
+ void CreateBlankPDM();
+ // Startup the provided PDM and add it to our list if successful.
+ bool StartupPDM(PlatformDecoderModule* aPDM);
+ // Returns the first PDM in our list supporting the mimetype.
+ already_AddRefed<PlatformDecoderModule>
+ GetDecoder(const TrackInfo& aTrackInfo,
+ DecoderDoctorDiagnostics* aDiagnostics) const;
+
+ already_AddRefed<MediaDataDecoder>
+ CreateDecoderWithPDM(PlatformDecoderModule* aPDM,
+ const CreateDecoderParams& aParams);
+
+ nsTArray<RefPtr<PlatformDecoderModule>> mCurrentPDMs;
+ RefPtr<PlatformDecoderModule> mEMEPDM;
+ RefPtr<PlatformDecoderModule> mBlankPDM;
+
+ bool mWMFFailedToLoad = false;
+ bool mFFmpegFailedToLoad = false;
+ bool mGMPPDMFailedToStartup = false;
+
+ void EnsureInit() const;
+ template<class T> friend class StaticAutoPtr;
+ static StaticAutoPtr<PDMFactoryImpl> sInstance;
+ static StaticMutex sMonitor;
+};
+
+} // namespace mozilla
+
+#endif /* PDMFactory_h_ */
diff --git a/dom/media/platforms/PlatformDecoderModule.h b/dom/media/platforms/PlatformDecoderModule.h
new file mode 100644
index 000000000..7480d8ff9
--- /dev/null
+++ b/dom/media/platforms/PlatformDecoderModule.h
@@ -0,0 +1,309 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(PlatformDecoderModule_h_)
+#define PlatformDecoderModule_h_
+
+#include "MediaDecoderReader.h"
+#include "MediaInfo.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/layers/LayersTypes.h"
+#include "mozilla/layers/KnowsCompositor.h"
+#include "nsTArray.h"
+#include "mozilla/RefPtr.h"
+#include "GMPService.h"
+#include <queue>
+#include "MediaResult.h"
+
+namespace mozilla {
+class TrackInfo;
+class AudioInfo;
+class VideoInfo;
+class MediaRawData;
+class DecoderDoctorDiagnostics;
+
+namespace layers {
+class ImageContainer;
+} // namespace layers
+
+namespace dom {
+class RemoteDecoderModule;
+}
+
+class MediaDataDecoder;
+class MediaDataDecoderCallback;
+class TaskQueue;
+class CDMProxy;
+
+static LazyLogModule sPDMLog("PlatformDecoderModule");
+
+struct MOZ_STACK_CLASS CreateDecoderParams final {
+ explicit CreateDecoderParams(const TrackInfo& aConfig)
+ : mConfig(aConfig)
+ {}
+
+ template <typename T1, typename... Ts>
+ CreateDecoderParams(const TrackInfo& aConfig, T1&& a1, Ts&&... args)
+ : mConfig(aConfig)
+ {
+ Set(mozilla::Forward<T1>(a1), mozilla::Forward<Ts>(args)...);
+ }
+
+ const VideoInfo& VideoConfig() const
+ {
+ MOZ_ASSERT(mConfig.IsVideo());
+ return *mConfig.GetAsVideoInfo();
+ }
+
+ const AudioInfo& AudioConfig() const
+ {
+ MOZ_ASSERT(mConfig.IsAudio());
+ return *mConfig.GetAsAudioInfo();
+ }
+
+ layers::LayersBackend GetLayersBackend() const
+ {
+ if (mKnowsCompositor) {
+ return mKnowsCompositor->GetCompositorBackendType();
+ }
+ return layers::LayersBackend::LAYERS_NONE;
+ }
+
+ const TrackInfo& mConfig;
+ TaskQueue* mTaskQueue = nullptr;
+ MediaDataDecoderCallback* mCallback = nullptr;
+ DecoderDoctorDiagnostics* mDiagnostics = nullptr;
+ layers::ImageContainer* mImageContainer = nullptr;
+ MediaResult* mError = nullptr;
+ RefPtr<layers::KnowsCompositor> mKnowsCompositor;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+ bool mUseBlankDecoder = false;
+
+private:
+ void Set(TaskQueue* aTaskQueue) { mTaskQueue = aTaskQueue; }
+ void Set(MediaDataDecoderCallback* aCallback) { mCallback = aCallback; }
+ void Set(DecoderDoctorDiagnostics* aDiagnostics) { mDiagnostics = aDiagnostics; }
+ void Set(layers::ImageContainer* aImageContainer) { mImageContainer = aImageContainer; }
+ void Set(MediaResult* aError) { mError = aError; }
+ void Set(GMPCrashHelper* aCrashHelper) { mCrashHelper = aCrashHelper; }
+ void Set(bool aUseBlankDecoder) { mUseBlankDecoder = aUseBlankDecoder; }
+ void Set(layers::KnowsCompositor* aKnowsCompositor) { mKnowsCompositor = aKnowsCompositor; }
+ template <typename T1, typename T2, typename... Ts>
+ void Set(T1&& a1, T2&& a2, Ts&&... args)
+ {
+ Set(mozilla::Forward<T1>(a1));
+ Set(mozilla::Forward<T2>(a2), mozilla::Forward<Ts>(args)...);
+ }
+};
+
+// The PlatformDecoderModule interface is used by the MediaFormatReader to
+// abstract access to decoders provided by various
+// platforms.
+// Each platform (Windows, MacOSX, Linux, B2G etc) must implement a
+// PlatformDecoderModule to provide access to its decoders in order to get
+// decompressed H.264/AAC from the MediaFormatReader.
+//
+// Decoding is asynchronous, and should be performed on the task queue
+// provided if the underlying platform isn't already exposing an async API.
+//
+// A cross-platform decoder module that discards input and produces "blank"
+// output samples exists for testing, and is created when the pref
+// "media.use-blank-decoder" is true.
+
+class PlatformDecoderModule {
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PlatformDecoderModule)
+
+ // Perform any per-instance initialization.
+ // This is called on the decode task queue.
+ virtual nsresult Startup() { return NS_OK; };
+
+ // Indicates if the PlatformDecoderModule supports decoding of aMimeType.
+ virtual bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const = 0;
+ virtual bool Supports(const TrackInfo& aTrackInfo,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+ {
+ // By default, fall back to SupportsMimeType with just the MIME string.
+ // (So PDMs do not need to override this method -- yet.)
+ return SupportsMimeType(aTrackInfo.mMimeType, aDiagnostics);
+ }
+
+ enum class ConversionRequired : uint8_t {
+ kNeedNone,
+ kNeedAVCC,
+ kNeedAnnexB,
+ };
+
+ // Indicates that the decoder requires a specific format.
+ // The PlatformDecoderModule will convert the demuxed data accordingly before
+ // feeding it to MediaDataDecoder::Input.
+ virtual ConversionRequired DecoderNeedsConversion(const TrackInfo& aConfig) const = 0;
+
+protected:
+ PlatformDecoderModule() {}
+ virtual ~PlatformDecoderModule() {}
+
+ friend class H264Converter;
+ friend class PDMFactory;
+ friend class dom::RemoteDecoderModule;
+
+ // Creates a Video decoder. The layers backend is passed in so that
+ // decoders can determine whether hardware accelerated decoding can be used.
+ // Asynchronous decoding of video should be done in runnables dispatched
+ // to aVideoTaskQueue. If the task queue isn't needed, the decoder should
+ // not hold a reference to it.
+ // Output and errors should be returned to the reader via aCallback.
+ // On Windows the task queue's threads in have MSCOM initialized with
+ // COINIT_MULTITHREADED.
+ // Returns nullptr if the decoder can't be created.
+ // It is safe to store a reference to aConfig.
+ // This is called on the decode task queue.
+ virtual already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) = 0;
+
+ // Creates an Audio decoder with the specified properties.
+ // Asynchronous decoding of audio should be done in runnables dispatched to
+ // aAudioTaskQueue. If the task queue isn't needed, the decoder should
+ // not hold a reference to it.
+ // Output and errors should be returned to the reader via aCallback.
+ // Returns nullptr if the decoder can't be created.
+ // On Windows the task queue's threads in have MSCOM initialized with
+ // COINIT_MULTITHREADED.
+ // It is safe to store a reference to aConfig.
+ // This is called on the decode task queue.
+ virtual already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) = 0;
+};
+
+// A callback used by MediaDataDecoder to return output/errors to the
+// MediaFormatReader.
+// Implementation is threadsafe, and can be called on any thread.
+class MediaDataDecoderCallback {
+public:
+ virtual ~MediaDataDecoderCallback() {}
+
+ // Called by MediaDataDecoder when a sample has been decoded.
+ virtual void Output(MediaData* aData) = 0;
+
+ // Denotes an error in the decoding process. The reader will stop calling
+ // the decoder.
+ virtual void Error(const MediaResult& aError) = 0;
+
+ // Denotes that the last input sample has been inserted into the decoder,
+ // and no more output can be produced unless more input is sent.
+ // A frame decoding session is completed once InputExhausted has been called.
+ // MediaDataDecoder::Input will not be called again until InputExhausted has
+ // been called.
+ virtual void InputExhausted() = 0;
+
+ virtual void DrainComplete() = 0;
+
+ virtual void ReleaseMediaResources() {}
+
+ virtual bool OnReaderTaskQueue() = 0;
+
+ // Denotes that a pending encryption key is preventing more input being fed
+ // into the decoder. This only needs to be overridden for callbacks that
+ // handle encryption. E.g. benchmarking does not use eme, so this need
+ // not be overridden in that case.
+ virtual void WaitingForKey() {}
+};
+
+// MediaDataDecoder is the interface exposed by decoders created by the
+// PlatformDecoderModule's Create*Decoder() functions. The type of
+// media data that the decoder accepts as valid input and produces as
+// output is determined when the MediaDataDecoder is created.
+//
+// Unless otherwise noted, all functions are only called on the decode task
+// queue. An exception is the MediaDataDecoder in
+// MediaFormatReader::IsVideoAccelerated() for which all calls (Init(),
+// IsHardwareAccelerated(), and Shutdown()) are from the main thread.
+//
+// Don't block inside these functions, unless it's explicitly noted that you
+// should (like in Flush()).
+//
+// Decoding is done asynchronously. Any async work can be done on the
+// TaskQueue passed into the PlatformDecoderModules's Create*Decoder()
+// function. This may not be necessary for platforms with async APIs
+// for decoding.
+//
+// If an error occurs at any point after the Init promise has been
+// completed, then Error() must be called on the associated
+// MediaDataDecoderCallback.
+class MediaDataDecoder {
+protected:
+ virtual ~MediaDataDecoder() {};
+
+public:
+ typedef TrackInfo::TrackType TrackType;
+ typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true> InitPromise;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataDecoder)
+
+ // Initialize the decoder. The decoder should be ready to decode once
+ // promise resolves. The decoder should do any initialization here, rather
+ // than in its constructor or PlatformDecoderModule::Create*Decoder(),
+ // so that if the MediaFormatReader needs to shutdown during initialization,
+ // it can call Shutdown() to cancel this operation. Any initialization
+ // that requires blocking the calling thread in this function *must*
+ // be done here so that it can be canceled by calling Shutdown()!
+ virtual RefPtr<InitPromise> Init() = 0;
+
+ // Inserts a sample into the decoder's decode pipeline.
+ virtual void Input(MediaRawData* aSample) = 0;
+
+ // Causes all samples in the decoding pipeline to be discarded. When
+ // this function returns, the decoder must be ready to accept new input
+ // for decoding. This function is called when the demuxer seeks, before
+ // decoding resumes after the seek.
+ // While the reader calls Flush(), it ignores all output sent to it;
+ // it is safe (but pointless) to send output while Flush is called.
+ // The MediaFormatReader will not call Input() while it's calling Flush().
+ virtual void Flush() = 0;
+
+ // Causes all complete samples in the pipeline that can be decoded to be
+ // output. If the decoder can't produce samples from the current output,
+ // it drops the input samples. The decoder may be holding onto samples
+ // that are required to decode samples that it expects to get in future.
+ // This is called when the demuxer reaches end of stream.
+ // The MediaFormatReader will not call Input() while it's calling Drain().
+ // This function is asynchronous. The MediaDataDecoder must call
+ // MediaDataDecoderCallback::DrainComplete() once all remaining
+ // samples have been output.
+ virtual void Drain() = 0;
+
+ // Cancels all init/input/drain operations, and shuts down the
+ // decoder. The platform decoder should clean up any resources it's using
+ // and release memory etc. Shutdown() must block until the decoder has
+ // completed shutdown. The reader calls Flush() before calling Shutdown().
+ // The reader will delete the decoder once Shutdown() returns.
+ // The MediaDataDecoderCallback *must* not be called after Shutdown() has
+ // returned.
+ virtual void Shutdown() = 0;
+
+ // Called from the state machine task queue or main thread.
+ // Decoder needs to decide whether or not hardware accelearation is supported
+ // after creating. It doesn't need to call Init() before calling this function.
+ virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
+
+ // Return the name of the MediaDataDecoder, only used for decoding.
+ // Only return a static const string, as the information may be accessed
+ // in a non thread-safe fashion.
+ virtual const char* GetDescriptionName() const = 0;
+
+ // Set a hint of seek target time to decoder. Decoder will drop any decoded
+ // data which pts is smaller than this value. This threshold needs to be clear
+ // after reset decoder.
+ // Decoder may not honor this value. However, it'd be better that
+ // video decoder implements this API to improve seek performance.
+ // Note: it should be called before Input() or after Flush().
+ virtual void SetSeekThreshold(const media::TimeUnit& aTime) {}
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/ReorderQueue.h b/dom/media/platforms/ReorderQueue.h
new file mode 100644
index 000000000..f4a002220
--- /dev/null
+++ b/dom/media/platforms/ReorderQueue.h
@@ -0,0 +1,29 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Queue for ordering decoded video frames by presentation time.
+// Decoders often return frames out of order, which we need to
+// buffer so we can forward them in correct presentation order.
+
+#ifndef mozilla_ReorderQueue_h
+#define mozilla_ReorderQueue_h
+
+#include <MediaData.h>
+#include <nsTPriorityQueue.h>
+
+namespace mozilla {
+
+struct ReorderQueueComparator
+{
+ bool LessThan(MediaData* const& a, MediaData* const& b) const
+ {
+ return a->mTime < b->mTime;
+ }
+};
+
+typedef nsTPriorityQueue<RefPtr<MediaData>, ReorderQueueComparator> ReorderQueue;
+
+} // namespace mozilla
+
+#endif // mozilla_ReorderQueue_h
diff --git a/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
new file mode 100644
index 000000000..7bd75b7fe
--- /dev/null
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AgnosticDecoderModule.h"
+#include "mozilla/Logging.h"
+#include "OpusDecoder.h"
+#include "VorbisDecoder.h"
+#include "VPXDecoder.h"
+#include "WAVDecoder.h"
+#include "TheoraDecoder.h"
+
+namespace mozilla {
+
+bool
+AgnosticDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ bool supports =
+ VPXDecoder::IsVPX(aMimeType) ||
+ OpusDataDecoder::IsOpus(aMimeType) ||
+ VorbisDataDecoder::IsVorbis(aMimeType) ||
+ WaveDataDecoder::IsWave(aMimeType) ||
+ TheoraDecoder::IsTheora(aMimeType);
+ MOZ_LOG(sPDMLog, LogLevel::Debug, ("Agnostic decoder %s requested type",
+ supports ? "supports" : "rejects"));
+ return supports;
+}
+
+already_AddRefed<MediaDataDecoder>
+AgnosticDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> m;
+
+ if (VPXDecoder::IsVPX(aParams.mConfig.mMimeType)) {
+ m = new VPXDecoder(aParams);
+ } else if (TheoraDecoder::IsTheora(aParams.mConfig.mMimeType)) {
+ m = new TheoraDecoder(aParams);
+ }
+
+ return m.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+AgnosticDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> m;
+
+ const TrackInfo& config = aParams.mConfig;
+ if (VorbisDataDecoder::IsVorbis(config.mMimeType)) {
+ m = new VorbisDataDecoder(aParams);
+ } else if (OpusDataDecoder::IsOpus(config.mMimeType)) {
+ m = new OpusDataDecoder(aParams);
+ } else if (WaveDataDecoder::IsWave(config.mMimeType)) {
+ m = new WaveDataDecoder(aParams);
+ }
+
+ return m.forget();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/AgnosticDecoderModule.h b/dom/media/platforms/agnostic/AgnosticDecoderModule.h
new file mode 100644
index 000000000..d944b84ab
--- /dev/null
+++ b/dom/media/platforms/agnostic/AgnosticDecoderModule.h
@@ -0,0 +1,34 @@
+#if !defined(AgnosticDecoderModule_h_)
+#define AgnosticDecoderModule_h_
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class AgnosticDecoderModule : public PlatformDecoderModule {
+public:
+ AgnosticDecoderModule() = default;
+ virtual ~AgnosticDecoderModule() = default;
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override
+ {
+ return ConversionRequired::kNeedNone;
+ }
+
+protected:
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+};
+
+} // namespace mozilla
+
+#endif /* AgnosticDecoderModule_h_ */
diff --git a/dom/media/platforms/agnostic/BlankDecoderModule.cpp b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
new file mode 100644
index 000000000..7d7bae721
--- /dev/null
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -0,0 +1,278 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ImageContainer.h"
+#include "MediaDecoderReader.h"
+#include "MediaInfo.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/mozalloc.h" // for operator new, and new (fallible)
+#include "mozilla/RefPtr.h"
+#include "mozilla/TaskQueue.h"
+#include "mp4_demuxer/AnnexB.h"
+#include "mp4_demuxer/H264.h"
+#include "MP4Decoder.h"
+#include "nsAutoPtr.h"
+#include "nsRect.h"
+#include "PlatformDecoderModule.h"
+#include "ReorderQueue.h"
+#include "TimeUnits.h"
+#include "VideoUtils.h"
+
+namespace mozilla {
+
+// Decoder that uses a passed in object's Create function to create blank
+// MediaData objects.
+template<class BlankMediaDataCreator>
+class BlankMediaDataDecoder : public MediaDataDecoder {
+public:
+
+ BlankMediaDataDecoder(BlankMediaDataCreator* aCreator,
+ const CreateDecoderParams& aParams)
+ : mCreator(aCreator)
+ , mCallback(aParams.mCallback)
+ , mMaxRefFrames(aParams.mConfig.GetType() == TrackInfo::kVideoTrack &&
+ MP4Decoder::IsH264(aParams.mConfig.mMimeType)
+ ? mp4_demuxer::AnnexB::HasSPS(aParams.VideoConfig().mExtraData)
+ ? mp4_demuxer::H264::ComputeMaxRefFrames(aParams.VideoConfig().mExtraData)
+ : 16
+ : 0)
+ , mType(aParams.mConfig.GetType())
+ {
+ }
+
+ RefPtr<InitPromise> Init() override {
+ return InitPromise::CreateAndResolve(mType, __func__);
+ }
+
+ void Shutdown() override {}
+
+ void Input(MediaRawData* aSample) override
+ {
+ RefPtr<MediaData> data =
+ mCreator->Create(media::TimeUnit::FromMicroseconds(aSample->mTime),
+ media::TimeUnit::FromMicroseconds(aSample->mDuration),
+ aSample->mOffset);
+
+ OutputFrame(data);
+ }
+
+ void Flush() override
+ {
+ mReorderQueue.Clear();
+ }
+
+ void Drain() override
+ {
+ while (!mReorderQueue.IsEmpty()) {
+ mCallback->Output(mReorderQueue.Pop().get());
+ }
+
+ mCallback->DrainComplete();
+ }
+
+ const char* GetDescriptionName() const override
+ {
+ return "blank media data decoder";
+ }
+
+private:
+ void OutputFrame(MediaData* aData)
+ {
+ if (!aData) {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+ return;
+ }
+
+ // Frames come out in DTS order but we need to output them in PTS order.
+ mReorderQueue.Push(aData);
+
+ while (mReorderQueue.Length() > mMaxRefFrames) {
+ mCallback->Output(mReorderQueue.Pop().get());
+ }
+ mCallback->InputExhausted();
+ }
+
+private:
+ nsAutoPtr<BlankMediaDataCreator> mCreator;
+ MediaDataDecoderCallback* mCallback;
+ const uint32_t mMaxRefFrames;
+ ReorderQueue mReorderQueue;
+ TrackInfo::TrackType mType;
+};
+
+class BlankVideoDataCreator {
+public:
+ BlankVideoDataCreator(uint32_t aFrameWidth,
+ uint32_t aFrameHeight,
+ layers::ImageContainer* aImageContainer)
+ : mFrameWidth(aFrameWidth)
+ , mFrameHeight(aFrameHeight)
+ , mImageContainer(aImageContainer)
+ {
+ mInfo.mDisplay = nsIntSize(mFrameWidth, mFrameHeight);
+ mPicture = gfx::IntRect(0, 0, mFrameWidth, mFrameHeight);
+ }
+
+ already_AddRefed<MediaData>
+ Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
+ {
+ // Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
+ // with a U and V plane that are half the size of the Y plane, i.e 8 bit,
+ // 2x2 subsampled.
+ const int sizeY = mFrameWidth * mFrameHeight;
+ const int sizeCbCr = ((mFrameWidth + 1) / 2) * ((mFrameHeight + 1) / 2);
+ auto frame = MakeUnique<uint8_t[]>(sizeY + sizeCbCr);
+ VideoData::YCbCrBuffer buffer;
+
+ // Y plane.
+ buffer.mPlanes[0].mData = frame.get();
+ buffer.mPlanes[0].mStride = mFrameWidth;
+ buffer.mPlanes[0].mHeight = mFrameHeight;
+ buffer.mPlanes[0].mWidth = mFrameWidth;
+ buffer.mPlanes[0].mOffset = 0;
+ buffer.mPlanes[0].mSkip = 0;
+
+ // Cb plane.
+ buffer.mPlanes[1].mData = frame.get() + sizeY;
+ buffer.mPlanes[1].mStride = mFrameWidth / 2;
+ buffer.mPlanes[1].mHeight = mFrameHeight / 2;
+ buffer.mPlanes[1].mWidth = mFrameWidth / 2;
+ buffer.mPlanes[1].mOffset = 0;
+ buffer.mPlanes[1].mSkip = 0;
+
+ // Cr plane.
+ buffer.mPlanes[2].mData = frame.get() + sizeY;
+ buffer.mPlanes[2].mStride = mFrameWidth / 2;
+ buffer.mPlanes[2].mHeight = mFrameHeight / 2;
+ buffer.mPlanes[2].mWidth = mFrameWidth / 2;
+ buffer.mPlanes[2].mOffset = 0;
+ buffer.mPlanes[2].mSkip = 0;
+
+ // Set to color white.
+ memset(buffer.mPlanes[0].mData, 255, sizeY);
+ memset(buffer.mPlanes[1].mData, 128, sizeCbCr);
+
+ return VideoData::CreateAndCopyData(mInfo,
+ mImageContainer,
+ aOffsetInStream,
+ aDTS.ToMicroseconds(),
+ aDuration.ToMicroseconds(),
+ buffer,
+ true,
+ aDTS.ToMicroseconds(),
+ mPicture);
+ }
+
+private:
+ VideoInfo mInfo;
+ gfx::IntRect mPicture;
+ uint32_t mFrameWidth;
+ uint32_t mFrameHeight;
+ RefPtr<layers::ImageContainer> mImageContainer;
+};
+
+class BlankAudioDataCreator {
+public:
+ BlankAudioDataCreator(uint32_t aChannelCount, uint32_t aSampleRate)
+ : mFrameSum(0), mChannelCount(aChannelCount), mSampleRate(aSampleRate)
+ {
+ }
+
+ MediaData* Create(const media::TimeUnit& aDTS,
+ const media::TimeUnit& aDuration,
+ int64_t aOffsetInStream)
+ {
+ // Convert duration to frames. We add 1 to duration to account for
+ // rounding errors, so we get a consistent tone.
+ CheckedInt64 frames =
+ UsecsToFrames(aDuration.ToMicroseconds()+1, mSampleRate);
+ if (!frames.isValid() ||
+ !mChannelCount ||
+ !mSampleRate ||
+ frames.value() > (UINT32_MAX / mChannelCount)) {
+ return nullptr;
+ }
+ AlignedAudioBuffer samples(frames.value() * mChannelCount);
+ if (!samples) {
+ return nullptr;
+ }
+ // Fill the sound buffer with an A4 tone.
+ static const float pi = 3.14159265f;
+ static const float noteHz = 440.0f;
+ for (int i = 0; i < frames.value(); i++) {
+ float f = sin(2 * pi * noteHz * mFrameSum / mSampleRate);
+ for (unsigned c = 0; c < mChannelCount; c++) {
+ samples[i * mChannelCount + c] = AudioDataValue(f);
+ }
+ mFrameSum++;
+ }
+ return new AudioData(aOffsetInStream,
+ aDTS.ToMicroseconds(),
+ aDuration.ToMicroseconds(),
+ uint32_t(frames.value()),
+ Move(samples),
+ mChannelCount,
+ mSampleRate);
+ }
+
+private:
+ int64_t mFrameSum;
+ uint32_t mChannelCount;
+ uint32_t mSampleRate;
+};
+
+class BlankDecoderModule : public PlatformDecoderModule {
+public:
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override {
+ const VideoInfo& config = aParams.VideoConfig();
+ BlankVideoDataCreator* creator = new BlankVideoDataCreator(
+ config.mDisplay.width, config.mDisplay.height, aParams.mImageContainer);
+ RefPtr<MediaDataDecoder> decoder =
+ new BlankMediaDataDecoder<BlankVideoDataCreator>(creator, aParams);
+ return decoder.forget();
+ }
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override {
+ const AudioInfo& config = aParams.AudioConfig();
+ BlankAudioDataCreator* creator = new BlankAudioDataCreator(
+ config.mChannels, config.mRate);
+
+ RefPtr<MediaDataDecoder> decoder =
+ new BlankMediaDataDecoder<BlankAudioDataCreator>(creator, aParams);
+ return decoder.forget();
+ }
+
+ bool
+ SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override
+ {
+ return true;
+ }
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override
+ {
+ if (aConfig.IsVideo() && MP4Decoder::IsH264(aConfig.mMimeType)) {
+ return ConversionRequired::kNeedAVCC;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+ }
+
+};
+
+already_AddRefed<PlatformDecoderModule> CreateBlankDecoderModule()
+{
+ RefPtr<PlatformDecoderModule> pdm = new BlankDecoderModule();
+ return pdm.forget();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/OpusDecoder.cpp b/dom/media/platforms/agnostic/OpusDecoder.cpp
new file mode 100644
index 000000000..9163ed058
--- /dev/null
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -0,0 +1,356 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OpusDecoder.h"
+#include "OpusParser.h"
+#include "TimeUnits.h"
+#include "VorbisUtils.h"
+#include "VorbisDecoder.h" // For VorbisLayout
+#include "mozilla/EndianUtils.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+
+#include <inttypes.h> // For PRId64
+
+#include "opus/opus.h"
+extern "C" {
+#include "opus/opus_multistream.h"
+}
+
+#define OPUS_DEBUG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, \
+ ("OpusDataDecoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+namespace mozilla {
+
+OpusDataDecoder::OpusDataDecoder(const CreateDecoderParams& aParams)
+ : mInfo(aParams.AudioConfig())
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(aParams.mCallback)
+ , mOpusDecoder(nullptr)
+ , mSkip(0)
+ , mDecodedHeader(false)
+ , mPaddingDiscarded(false)
+ , mFrames(0)
+ , mIsFlushing(false)
+{
+}
+
+OpusDataDecoder::~OpusDataDecoder()
+{
+ if (mOpusDecoder) {
+ opus_multistream_decoder_destroy(mOpusDecoder);
+ mOpusDecoder = nullptr;
+ }
+}
+
+void
+OpusDataDecoder::Shutdown()
+{
+}
+
+void
+OpusDataDecoder::AppendCodecDelay(MediaByteBuffer* config, uint64_t codecDelayUS)
+{
+ uint8_t buffer[sizeof(uint64_t)];
+ BigEndian::writeUint64(buffer, codecDelayUS);
+ config->AppendElements(buffer, sizeof(uint64_t));
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+OpusDataDecoder::Init()
+{
+ size_t length = mInfo.mCodecSpecificConfig->Length();
+ uint8_t *p = mInfo.mCodecSpecificConfig->Elements();
+ if (length < sizeof(uint64_t)) {
+ OPUS_DEBUG("CodecSpecificConfig too short to read codecDelay!");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ int64_t codecDelay = BigEndian::readUint64(p);
+ length -= sizeof(uint64_t);
+ p += sizeof(uint64_t);
+ if (NS_FAILED(DecodeHeader(p, length))) {
+ OPUS_DEBUG("Error decoding header!");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ int r;
+ mOpusDecoder = opus_multistream_decoder_create(mOpusParser->mRate,
+ mOpusParser->mChannels,
+ mOpusParser->mStreams,
+ mOpusParser->mCoupledStreams,
+ mMappingTable,
+ &r);
+ mSkip = mOpusParser->mPreSkip;
+ mPaddingDiscarded = false;
+
+ if (codecDelay != FramesToUsecs(mOpusParser->mPreSkip,
+ mOpusParser->mRate).value()) {
+ NS_WARNING("Invalid Opus header: CodecDelay and pre-skip do not match!");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ if (mInfo.mRate != (uint32_t)mOpusParser->mRate) {
+ NS_WARNING("Invalid Opus header: container and codec rate do not match!");
+ }
+ if (mInfo.mChannels != (uint32_t)mOpusParser->mChannels) {
+ NS_WARNING("Invalid Opus header: container and codec channels do not match!");
+ }
+
+ return r == OPUS_OK ? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__)
+ : InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+}
+
+nsresult
+OpusDataDecoder::DecodeHeader(const unsigned char* aData, size_t aLength)
+{
+ MOZ_ASSERT(!mOpusParser);
+ MOZ_ASSERT(!mOpusDecoder);
+ MOZ_ASSERT(!mDecodedHeader);
+ mDecodedHeader = true;
+
+ mOpusParser = new OpusParser;
+ if (!mOpusParser->DecodeHeader(const_cast<unsigned char*>(aData), aLength)) {
+ return NS_ERROR_FAILURE;
+ }
+ int channels = mOpusParser->mChannels;
+
+ AudioConfig::ChannelLayout layout(channels);
+ if (!layout.IsValid()) {
+ OPUS_DEBUG("Invalid channel mapping. Source is %d channels", channels);
+ return NS_ERROR_FAILURE;
+ }
+
+ AudioConfig::ChannelLayout vorbisLayout(
+ channels, VorbisDataDecoder::VorbisLayout(channels));
+ AudioConfig::ChannelLayout smpteLayout(channels);
+ static_assert(sizeof(mOpusParser->mMappingTable) / sizeof(mOpusParser->mMappingTable[0]) >= MAX_AUDIO_CHANNELS,
+ "Invalid size set");
+ uint8_t map[sizeof(mOpusParser->mMappingTable) / sizeof(mOpusParser->mMappingTable[0])];
+ if (vorbisLayout.MappingTable(smpteLayout, map)) {
+ for (int i = 0; i < channels; i++) {
+ mMappingTable[i] = mOpusParser->mMappingTable[map[i]];
+ }
+ } else {
+ // Should never get here as vorbis layout is always convertible to SMPTE
+ // default layout.
+ PodCopy(mMappingTable, mOpusParser->mMappingTable, MAX_AUDIO_CHANNELS);
+ }
+
+ return NS_OK;
+}
+
+void
+OpusDataDecoder::Input(MediaRawData* aSample)
+{
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &OpusDataDecoder::ProcessDecode, aSample));
+}
+
+void
+OpusDataDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ if (mIsFlushing) {
+ return;
+ }
+
+ MediaResult rv = DoDecode(aSample);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(rv);
+ return;
+ }
+ mCallback->InputExhausted();
+}
+
+MediaResult
+OpusDataDecoder::DoDecode(MediaRawData* aSample)
+{
+ uint32_t channels = mOpusParser->mChannels;
+
+ if (mPaddingDiscarded) {
+ // Discard padding should be used only on the final packet, so
+ // decoding after a padding discard is invalid.
+ OPUS_DEBUG("Opus error, discard padding on interstitial packet");
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Discard padding on interstitial packet"));
+ }
+
+ if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
+ // We are starting a new block.
+ mFrames = 0;
+ mLastFrameTime = Some(aSample->mTime);
+ }
+
+ // Maximum value is 63*2880, so there's no chance of overflow.
+ int frames_number =
+ opus_packet_get_nb_frames(aSample->Data(), aSample->Size());
+ if (frames_number <= 0) {
+ OPUS_DEBUG("Invalid packet header: r=%d length=%u",
+ frames_number, uint32_t(aSample->Size()));
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid packet header: r=%d length=%u",
+ frames_number, uint32_t(aSample->Size())));
+ }
+
+ int samples = opus_packet_get_samples_per_frame(
+ aSample->Data(), opus_int32(mOpusParser->mRate));
+
+
+ // A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
+ CheckedInt32 totalFrames =
+ CheckedInt32(frames_number) * CheckedInt32(samples);
+ if (!totalFrames.isValid()) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Frames count overflow"));
+ }
+
+ int frames = totalFrames.value();
+ if (frames < 120 || frames > 5760) {
+ OPUS_DEBUG("Invalid packet frames: %d", frames);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid packet frames:%d", frames));
+ }
+
+ AlignedAudioBuffer buffer(frames * channels);
+ if (!buffer) {
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+
+ // Decode to the appropriate sample type.
+#ifdef MOZ_SAMPLE_TYPE_FLOAT32
+ int ret = opus_multistream_decode_float(mOpusDecoder,
+ aSample->Data(), aSample->Size(),
+ buffer.get(), frames, false);
+#else
+ int ret = opus_multistream_decode(mOpusDecoder,
+ aSample->Data(), aSample->Size(),
+ buffer.get(), frames, false);
+#endif
+ if (ret < 0) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Opus decoding error:%d", ret));
+ }
+ NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
+ CheckedInt64 startTime = aSample->mTime;
+
+ // Trim the initial frames while the decoder is settling.
+ if (mSkip > 0) {
+ int32_t skipFrames = std::min<int32_t>(mSkip, frames);
+ int32_t keepFrames = frames - skipFrames;
+ OPUS_DEBUG(
+ "Opus decoder skipping %d of %d frames", skipFrames, frames);
+ PodMove(buffer.get(),
+ buffer.get() + skipFrames * channels,
+ keepFrames * channels);
+ startTime = startTime + FramesToUsecs(skipFrames, mOpusParser->mRate);
+ frames = keepFrames;
+ mSkip -= skipFrames;
+ }
+
+ if (aSample->mDiscardPadding > 0) {
+ OPUS_DEBUG("Opus decoder discarding %u of %d frames",
+ aSample->mDiscardPadding, frames);
+ // Padding discard is only supposed to happen on the final packet.
+ // Record the discard so we can return an error if another packet is
+ // decoded.
+ if (aSample->mDiscardPadding > uint32_t(frames)) {
+ // Discarding more than the entire packet is invalid.
+ OPUS_DEBUG("Opus error, discard padding larger than packet");
+ return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Discard padding larger than packet"));
+ }
+
+ mPaddingDiscarded = true;
+ frames = frames - aSample->mDiscardPadding;
+ }
+
+ // Apply the header gain if one was specified.
+#ifdef MOZ_SAMPLE_TYPE_FLOAT32
+ if (mOpusParser->mGain != 1.0f) {
+ float gain = mOpusParser->mGain;
+ uint32_t samples = frames * channels;
+ for (uint32_t i = 0; i < samples; i++) {
+ buffer[i] *= gain;
+ }
+ }
+#else
+ if (mOpusParser->mGain_Q16 != 65536) {
+ int64_t gain_Q16 = mOpusParser->mGain_Q16;
+ uint32_t samples = frames * channels;
+ for (uint32_t i = 0; i < samples; i++) {
+ int32_t val = static_cast<int32_t>((gain_Q16*buffer[i] + 32768)>>16);
+ buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val));
+ }
+ }
+#endif
+
+ CheckedInt64 duration = FramesToUsecs(frames, mOpusParser->mRate);
+ if (!duration.isValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting WebM audio duration"));
+ }
+ CheckedInt64 time =
+ startTime - FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate) +
+ FramesToUsecs(mFrames, mOpusParser->mRate);
+ if (!time.isValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow shifting tstamp by codec delay"));
+ };
+
+ mCallback->Output(new AudioData(aSample->mOffset,
+ time.value(),
+ duration.value(),
+ frames,
+ Move(buffer),
+ mOpusParser->mChannels,
+ mOpusParser->mRate));
+ mFrames += frames;
+ return NS_OK;
+}
+
+void
+OpusDataDecoder::ProcessDrain()
+{
+ mCallback->DrainComplete();
+}
+
+void
+OpusDataDecoder::Drain()
+{
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &OpusDataDecoder::ProcessDrain));
+}
+
+void
+OpusDataDecoder::Flush()
+{
+ if (!mOpusDecoder) {
+ return;
+ }
+ mIsFlushing = true;
+ RefPtr<OpusDataDecoder> self = this;
+ nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction([self] () {
+ MOZ_ASSERT(self->mOpusDecoder);
+ // Reset the decoder.
+ opus_multistream_decoder_ctl(self->mOpusDecoder, OPUS_RESET_STATE);
+ self->mSkip = self->mOpusParser->mPreSkip;
+ self->mPaddingDiscarded = false;
+ self->mLastFrameTime.reset();
+ });
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ mIsFlushing = false;
+}
+
+/* static */
+bool
+OpusDataDecoder::IsOpus(const nsACString& aMimeType)
+{
+ return aMimeType.EqualsLiteral("audio/opus");
+}
+
+} // namespace mozilla
+#undef OPUS_DEBUG
diff --git a/dom/media/platforms/agnostic/OpusDecoder.h b/dom/media/platforms/agnostic/OpusDecoder.h
new file mode 100644
index 000000000..6d82ae502
--- /dev/null
+++ b/dom/media/platforms/agnostic/OpusDecoder.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(OpusDecoder_h_)
+#define OpusDecoder_h_
+
+#include "PlatformDecoderModule.h"
+
+#include "mozilla/Maybe.h"
+#include "nsAutoPtr.h"
+
+struct OpusMSDecoder;
+
+namespace mozilla {
+
+class OpusParser;
+
+class OpusDataDecoder : public MediaDataDecoder
+{
+public:
+ explicit OpusDataDecoder(const CreateDecoderParams& aParams);
+ ~OpusDataDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "opus audio decoder";
+ }
+
+ // Return true if mimetype is Opus
+ static bool IsOpus(const nsACString& aMimeType);
+
+ // Pack pre-skip/CodecDelay, given in microseconds, into a
+ // MediaByteBuffer. The decoder expects this value to come
+ // from the container (if any) and to precede the OpusHead
+ // block in the CodecSpecificConfig buffer to verify the
+ // values match.
+ static void AppendCodecDelay(MediaByteBuffer* config, uint64_t codecDelayUS);
+
+private:
+ nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
+
+ void ProcessDecode(MediaRawData* aSample);
+ MediaResult DoDecode(MediaRawData* aSample);
+ void ProcessDrain();
+
+ const AudioInfo& mInfo;
+ const RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+
+ // Opus decoder state
+ nsAutoPtr<OpusParser> mOpusParser;
+ OpusMSDecoder* mOpusDecoder;
+
+ uint16_t mSkip; // Samples left to trim before playback.
+ bool mDecodedHeader;
+
+ // Opus padding should only be discarded on the final packet. Once this
+ // is set to true, if the reader attempts to decode any further packets it
+ // will raise an error so we can indicate that the file is invalid.
+ bool mPaddingDiscarded;
+ int64_t mFrames;
+ Maybe<int64_t> mLastFrameTime;
+ uint8_t mMappingTable[MAX_AUDIO_CHANNELS]; // Channel mapping table.
+
+ Atomic<bool> mIsFlushing;
+};
+
+} // namespace mozilla
+#endif
diff --git a/dom/media/platforms/agnostic/TheoraDecoder.cpp b/dom/media/platforms/agnostic/TheoraDecoder.cpp
new file mode 100644
index 000000000..b216791e4
--- /dev/null
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -0,0 +1,240 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "TheoraDecoder.h"
+#include "XiphExtradata.h"
+#include "gfx2DGlue.h"
+#include "nsError.h"
+#include "TimeUnits.h"
+#include "mozilla/PodOperations.h"
+
+#include <algorithm>
+
+#undef LOG
+#define LOG(arg, ...) MOZ_LOG(gMediaDecoderLog, mozilla::LogLevel::Debug, ("TheoraDecoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+namespace mozilla {
+
+using namespace gfx;
+using namespace layers;
+
+extern LazyLogModule gMediaDecoderLog;
+
+ogg_packet InitTheoraPacket(const unsigned char* aData, size_t aLength,
+ bool aBOS, bool aEOS,
+ int64_t aGranulepos, int64_t aPacketNo)
+{
+ ogg_packet packet;
+ packet.packet = const_cast<unsigned char*>(aData);
+ packet.bytes = aLength;
+ packet.b_o_s = aBOS;
+ packet.e_o_s = aEOS;
+ packet.granulepos = aGranulepos;
+ packet.packetno = aPacketNo;
+ return packet;
+}
+
+TheoraDecoder::TheoraDecoder(const CreateDecoderParams& aParams)
+ : mImageContainer(aParams.mImageContainer)
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(aParams.mCallback)
+ , mIsFlushing(false)
+ , mTheoraSetupInfo(nullptr)
+ , mTheoraDecoderContext(nullptr)
+ , mPacketCount(0)
+ , mInfo(aParams.VideoConfig())
+{
+ MOZ_COUNT_CTOR(TheoraDecoder);
+}
+
+TheoraDecoder::~TheoraDecoder()
+{
+ MOZ_COUNT_DTOR(TheoraDecoder);
+ th_setup_free(mTheoraSetupInfo);
+ th_comment_clear(&mTheoraComment);
+ th_info_clear(&mTheoraInfo);
+}
+
+void
+TheoraDecoder::Shutdown()
+{
+ if (mTheoraDecoderContext) {
+ th_decode_free(mTheoraDecoderContext);
+ mTheoraDecoderContext = nullptr;
+ }
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+TheoraDecoder::Init()
+{
+ th_comment_init(&mTheoraComment);
+ th_info_init(&mTheoraInfo);
+
+ nsTArray<unsigned char*> headers;
+ nsTArray<size_t> headerLens;
+ if (!XiphExtradataToHeaders(headers, headerLens,
+ mInfo.mCodecSpecificConfig->Elements(),
+ mInfo.mCodecSpecificConfig->Length())) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ for (size_t i = 0; i < headers.Length(); i++) {
+ if (NS_FAILED(DoDecodeHeader(headers[i], headerLens[i]))) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ }
+ if (mPacketCount != 3) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ mTheoraDecoderContext = th_decode_alloc(&mTheoraInfo, mTheoraSetupInfo);
+ if (mTheoraDecoderContext) {
+ return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+ } else {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+}
+
+void
+TheoraDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
+ // nothing to do for now.
+ });
+ SyncRunnable::DispatchToThread(mTaskQueue, r);
+ mIsFlushing = false;
+}
+
+nsresult
+TheoraDecoder::DoDecodeHeader(const unsigned char* aData, size_t aLength)
+{
+ bool bos = mPacketCount == 0;
+ ogg_packet pkt = InitTheoraPacket(aData, aLength, bos, false, 0, mPacketCount++);
+
+ int r = th_decode_headerin(&mTheoraInfo,
+ &mTheoraComment,
+ &mTheoraSetupInfo,
+ &pkt);
+ return r > 0 ? NS_OK : NS_ERROR_FAILURE;
+}
+
+MediaResult
+TheoraDecoder::DoDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ const unsigned char* aData = aSample->Data();
+ size_t aLength = aSample->Size();
+
+ bool bos = mPacketCount == 0;
+ ogg_packet pkt = InitTheoraPacket(aData, aLength, bos, false, aSample->mTimecode, mPacketCount++);
+
+ int ret = th_decode_packetin(mTheoraDecoderContext, &pkt, nullptr);
+ if (ret == 0 || ret == TH_DUPFRAME) {
+ th_ycbcr_buffer ycbcr;
+ th_decode_ycbcr_out(mTheoraDecoderContext, ycbcr);
+
+ int hdec = !(mTheoraInfo.pixel_fmt & 1);
+ int vdec = !(mTheoraInfo.pixel_fmt & 2);
+
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = ycbcr[0].data;
+ b.mPlanes[0].mStride = ycbcr[0].stride;
+ b.mPlanes[0].mHeight = mTheoraInfo.frame_height;
+ b.mPlanes[0].mWidth = mTheoraInfo.frame_width;
+ b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = ycbcr[1].data;
+ b.mPlanes[1].mStride = ycbcr[1].stride;
+ b.mPlanes[1].mHeight = mTheoraInfo.frame_height >> vdec;
+ b.mPlanes[1].mWidth = mTheoraInfo.frame_width >> hdec;
+ b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = ycbcr[2].data;
+ b.mPlanes[2].mStride = ycbcr[2].stride;
+ b.mPlanes[2].mHeight = mTheoraInfo.frame_height >> vdec;
+ b.mPlanes[2].mWidth = mTheoraInfo.frame_width >> hdec;
+ b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
+
+ IntRect pictureArea(mTheoraInfo.pic_x, mTheoraInfo.pic_y,
+ mTheoraInfo.pic_width, mTheoraInfo.pic_height);
+
+ VideoInfo info;
+ info.mDisplay = mInfo.mDisplay;
+ RefPtr<VideoData> v =
+ VideoData::CreateAndCopyData(info,
+ mImageContainer,
+ aSample->mOffset,
+ aSample->mTime,
+ aSample->mDuration,
+ b,
+ aSample->mKeyframe,
+ aSample->mTimecode,
+ mInfo.ScaledImageRect(mTheoraInfo.frame_width,
+ mTheoraInfo.frame_height));
+ if (!v) {
+ LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
+ mTheoraInfo.frame_width, mTheoraInfo.frame_height, mInfo.mDisplay.width, mInfo.mDisplay.height,
+ mInfo.mImage.width, mInfo.mImage.height);
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ mCallback->Output(v);
+ return NS_OK;
+ } else {
+ LOG("Theora Decode error: %d", ret);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Theora decode error:%d", ret));
+ }
+}
+
+void
+TheoraDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mIsFlushing) {
+ return;
+ }
+ MediaResult rv = DoDecode(aSample);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(rv);
+ } else {
+ mCallback->InputExhausted();
+ }
+}
+
+void
+TheoraDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &TheoraDecoder::ProcessDecode, aSample));
+}
+
+void
+TheoraDecoder::ProcessDrain()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ mCallback->DrainComplete();
+}
+
+void
+TheoraDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &TheoraDecoder::ProcessDrain));
+}
+
+/* static */
+bool
+TheoraDecoder::IsTheora(const nsACString& aMimeType)
+{
+ return aMimeType.EqualsLiteral("video/theora");
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/TheoraDecoder.h b/dom/media/platforms/agnostic/TheoraDecoder.h
new file mode 100644
index 000000000..4a5daebb9
--- /dev/null
+++ b/dom/media/platforms/agnostic/TheoraDecoder.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(TheoraDecoder_h_)
+#define TheoraDecoder_h_
+
+#include "PlatformDecoderModule.h"
+
+#include <stdint.h>
+#include "ogg/ogg.h"
+#include "theora/theoradec.h"
+
+namespace mozilla {
+
+ using namespace layers;
+
+class TheoraDecoder : public MediaDataDecoder
+{
+public:
+ explicit TheoraDecoder(const CreateDecoderParams& aParams);
+
+ ~TheoraDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+
+ // Return true if mimetype is a Theora codec
+ static bool IsTheora(const nsACString& aMimeType);
+
+ const char* GetDescriptionName() const override
+ {
+ return "theora video decoder";
+ }
+
+private:
+ nsresult DoDecodeHeader(const unsigned char* aData, size_t aLength);
+
+ void ProcessDecode(MediaRawData* aSample);
+ MediaResult DoDecode(MediaRawData* aSample);
+ void ProcessDrain();
+
+ RefPtr<ImageContainer> mImageContainer;
+ RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+ Atomic<bool> mIsFlushing;
+
+ // Theora header & decoder state
+ th_info mTheoraInfo;
+ th_comment mTheoraComment;
+ th_setup_info *mTheoraSetupInfo;
+ th_dec_ctx *mTheoraDecoderContext;
+ int mPacketCount;
+
+ const VideoInfo& mInfo;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/agnostic/VPXDecoder.cpp b/dom/media/platforms/agnostic/VPXDecoder.cpp
new file mode 100644
index 000000000..77c81b51b
--- /dev/null
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -0,0 +1,253 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VPXDecoder.h"
+#include "gfx2DGlue.h"
+#include "nsError.h"
+#include "TimeUnits.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+#include "prsystem.h"
+
+#include <algorithm>
+
+#undef LOG
+#define LOG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("VPXDecoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+namespace mozilla {
+
+using namespace gfx;
+using namespace layers;
+
+static int MimeTypeToCodec(const nsACString& aMimeType)
+{
+ if (aMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
+ return VPXDecoder::Codec::VP8;
+ } else if (aMimeType.EqualsLiteral("video/webm; codecs=vp9")) {
+ return VPXDecoder::Codec::VP9;
+ } else if (aMimeType.EqualsLiteral("video/vp9")) {
+ return VPXDecoder::Codec::VP9;
+ }
+ return -1;
+}
+
+VPXDecoder::VPXDecoder(const CreateDecoderParams& aParams)
+ : mImageContainer(aParams.mImageContainer)
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(aParams.mCallback)
+ , mIsFlushing(false)
+ , mInfo(aParams.VideoConfig())
+ , mCodec(MimeTypeToCodec(aParams.VideoConfig().mMimeType))
+{
+ MOZ_COUNT_CTOR(VPXDecoder);
+ PodZero(&mVPX);
+}
+
+VPXDecoder::~VPXDecoder()
+{
+ MOZ_COUNT_DTOR(VPXDecoder);
+}
+
+void
+VPXDecoder::Shutdown()
+{
+ vpx_codec_destroy(&mVPX);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+VPXDecoder::Init()
+{
+ int decode_threads = 2;
+
+ vpx_codec_iface_t* dx = nullptr;
+ if (mCodec == Codec::VP8) {
+ dx = vpx_codec_vp8_dx();
+ } else if (mCodec == Codec::VP9) {
+ dx = vpx_codec_vp9_dx();
+ if (mInfo.mDisplay.width >= 2048) {
+ decode_threads = 8;
+ } else if (mInfo.mDisplay.width >= 1024) {
+ decode_threads = 4;
+ }
+ }
+ decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors());
+
+ vpx_codec_dec_cfg_t config;
+ config.threads = decode_threads;
+ config.w = config.h = 0; // set after decode
+
+ if (!dx || vpx_codec_dec_init(&mVPX, dx, &config, 0)) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+}
+
+void
+VPXDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
+ // nothing to do for now.
+ });
+ SyncRunnable::DispatchToThread(mTaskQueue, r);
+ mIsFlushing = false;
+}
+
+MediaResult
+VPXDecoder::DoDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+#if defined(DEBUG)
+ vpx_codec_stream_info_t si;
+ PodZero(&si);
+ si.sz = sizeof(si);
+ if (mCodec == Codec::VP8) {
+ vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), aSample->Data(), aSample->Size(), &si);
+ } else if (mCodec == Codec::VP9) {
+ vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), aSample->Data(), aSample->Size(), &si);
+ }
+ NS_ASSERTION(bool(si.is_kf) == aSample->mKeyframe,
+ "VPX Decode Keyframe error sample->mKeyframe and si.si_kf out of sync");
+#endif
+
+ if (vpx_codec_err_t r = vpx_codec_decode(&mVPX, aSample->Data(), aSample->Size(), nullptr, 0)) {
+ LOG("VPX Decode error: %s", vpx_codec_err_to_string(r));
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VPX error: %s", vpx_codec_err_to_string(r)));
+ }
+
+ vpx_codec_iter_t iter = nullptr;
+ vpx_image_t *img;
+
+ while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
+ NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420 ||
+ img->fmt == VPX_IMG_FMT_I444,
+ "WebM image format not I420 or I444");
+
+ // Chroma shifts are rounded down as per the decoding examples in the SDK
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = img->planes[0];
+ b.mPlanes[0].mStride = img->stride[0];
+ b.mPlanes[0].mHeight = img->d_h;
+ b.mPlanes[0].mWidth = img->d_w;
+ b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = img->planes[1];
+ b.mPlanes[1].mStride = img->stride[1];
+ b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = img->planes[2];
+ b.mPlanes[2].mStride = img->stride[2];
+ b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
+
+ if (img->fmt == VPX_IMG_FMT_I420) {
+ b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+
+ b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
+ b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
+ } else if (img->fmt == VPX_IMG_FMT_I444) {
+ b.mPlanes[1].mHeight = img->d_h;
+ b.mPlanes[1].mWidth = img->d_w;
+
+ b.mPlanes[2].mHeight = img->d_h;
+ b.mPlanes[2].mWidth = img->d_w;
+ } else {
+ LOG("VPX Unknown image format");
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VPX Unknown image format"));
+ }
+
+ RefPtr<VideoData> v =
+ VideoData::CreateAndCopyData(mInfo,
+ mImageContainer,
+ aSample->mOffset,
+ aSample->mTime,
+ aSample->mDuration,
+ b,
+ aSample->mKeyframe,
+ aSample->mTimecode,
+ mInfo.ScaledImageRect(img->d_w,
+ img->d_h));
+
+ if (!v) {
+ LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
+ img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
+ mInfo.mImage.width, mInfo.mImage.height);
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ mCallback->Output(v);
+ }
+ return NS_OK;
+}
+
+void
+VPXDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mIsFlushing) {
+ return;
+ }
+ MediaResult rv = DoDecode(aSample);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(rv);
+ } else {
+ mCallback->InputExhausted();
+ }
+}
+
+void
+VPXDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &VPXDecoder::ProcessDecode, aSample));
+}
+
+void
+VPXDecoder::ProcessDrain()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ mCallback->DrainComplete();
+}
+
+void
+VPXDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &VPXDecoder::ProcessDrain));
+}
+
+/* static */
+bool
+VPXDecoder::IsVPX(const nsACString& aMimeType, uint8_t aCodecMask)
+{
+ return ((aCodecMask & VPXDecoder::VP8) &&
+ aMimeType.EqualsLiteral("video/webm; codecs=vp8")) ||
+ ((aCodecMask & VPXDecoder::VP9) &&
+ aMimeType.EqualsLiteral("video/webm; codecs=vp9")) ||
+ ((aCodecMask & VPXDecoder::VP9) &&
+ aMimeType.EqualsLiteral("video/vp9"));
+}
+
+/* static */
+bool
+VPXDecoder::IsVP8(const nsACString& aMimeType)
+{
+ return IsVPX(aMimeType, VPXDecoder::VP8);
+}
+
+/* static */
+bool
+VPXDecoder::IsVP9(const nsACString& aMimeType)
+{
+ return IsVPX(aMimeType, VPXDecoder::VP9);
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/VPXDecoder.h b/dom/media/platforms/agnostic/VPXDecoder.h
new file mode 100644
index 000000000..d420ec069
--- /dev/null
+++ b/dom/media/platforms/agnostic/VPXDecoder.h
@@ -0,0 +1,69 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(VPXDecoder_h_)
+#define VPXDecoder_h_
+
+#include "PlatformDecoderModule.h"
+
+#include <stdint.h>
+#define VPX_DONT_DEFINE_STDINT_TYPES
+#include "vpx/vp8dx.h"
+#include "vpx/vpx_codec.h"
+#include "vpx/vpx_decoder.h"
+
+namespace mozilla {
+
+using namespace layers;
+
+class VPXDecoder : public MediaDataDecoder
+{
+public:
+ explicit VPXDecoder(const CreateDecoderParams& aParams);
+ ~VPXDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "libvpx video decoder";
+ }
+
+ enum Codec: uint8_t {
+ VP8 = 1 << 0,
+ VP9 = 1 << 1
+ };
+
+ // Return true if aMimeType is a one of the strings used by our demuxers to
+ // identify VPX of the specified type. Does not parse general content type
+ // strings, i.e. white space matters.
+ static bool IsVPX(const nsACString& aMimeType, uint8_t aCodecMask=VP8|VP9);
+ static bool IsVP8(const nsACString& aMimeType);
+ static bool IsVP9(const nsACString& aMimeType);
+
+private:
+ void ProcessDecode(MediaRawData* aSample);
+ MediaResult DoDecode(MediaRawData* aSample);
+ void ProcessDrain();
+
+ const RefPtr<ImageContainer> mImageContainer;
+ const RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+ Atomic<bool> mIsFlushing;
+
+ // VPx decoder state
+ vpx_codec_ctx_t mVPX;
+
+ const VideoInfo& mInfo;
+
+ const int mCodec;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/agnostic/VorbisDecoder.cpp b/dom/media/platforms/agnostic/VorbisDecoder.cpp
new file mode 100644
index 000000000..ed8b90dbd
--- /dev/null
+++ b/dom/media/platforms/agnostic/VorbisDecoder.cpp
@@ -0,0 +1,349 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "VorbisDecoder.h"
+#include "VorbisUtils.h"
+#include "XiphExtradata.h"
+
+#include "mozilla/PodOperations.h"
+#include "mozilla/SyncRunnable.h"
+
+#undef LOG
+#define LOG(type, msg) MOZ_LOG(sPDMLog, type, msg)
+
+namespace mozilla {
+
+ogg_packet InitVorbisPacket(const unsigned char* aData, size_t aLength,
+ bool aBOS, bool aEOS,
+ int64_t aGranulepos, int64_t aPacketNo)
+{
+ ogg_packet packet;
+ packet.packet = const_cast<unsigned char*>(aData);
+ packet.bytes = aLength;
+ packet.b_o_s = aBOS;
+ packet.e_o_s = aEOS;
+ packet.granulepos = aGranulepos;
+ packet.packetno = aPacketNo;
+ return packet;
+}
+
+VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
+ : mInfo(aParams.AudioConfig())
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(aParams.mCallback)
+ , mPacketCount(0)
+ , mFrames(0)
+ , mIsFlushing(false)
+{
+ // Zero these member vars to avoid crashes in Vorbis clear functions when
+ // destructor is called before |Init|.
+ PodZero(&mVorbisBlock);
+ PodZero(&mVorbisDsp);
+ PodZero(&mVorbisInfo);
+ PodZero(&mVorbisComment);
+}
+
+VorbisDataDecoder::~VorbisDataDecoder()
+{
+ vorbis_block_clear(&mVorbisBlock);
+ vorbis_dsp_clear(&mVorbisDsp);
+ vorbis_info_clear(&mVorbisInfo);
+ vorbis_comment_clear(&mVorbisComment);
+}
+
+void
+VorbisDataDecoder::Shutdown()
+{
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+VorbisDataDecoder::Init()
+{
+ vorbis_info_init(&mVorbisInfo);
+ vorbis_comment_init(&mVorbisComment);
+ PodZero(&mVorbisDsp);
+ PodZero(&mVorbisBlock);
+
+ AutoTArray<unsigned char*,4> headers;
+ AutoTArray<size_t,4> headerLens;
+ if (!XiphExtradataToHeaders(headers, headerLens,
+ mInfo.mCodecSpecificConfig->Elements(),
+ mInfo.mCodecSpecificConfig->Length())) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ for (size_t i = 0; i < headers.Length(); i++) {
+ if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+ }
+
+ MOZ_ASSERT(mPacketCount == 3);
+
+ int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
+ if (r) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
+ if (r) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
+ LOG(LogLevel::Warning,
+ ("Invalid Vorbis header: container and codec rate do not match!"));
+ }
+ if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
+ LOG(LogLevel::Warning,
+ ("Invalid Vorbis header: container and codec channels do not match!"));
+ }
+
+ AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
+ if (!layout.IsValid()) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
+}
+
+nsresult
+VorbisDataDecoder::DecodeHeader(const unsigned char* aData, size_t aLength)
+{
+ bool bos = mPacketCount == 0;
+ ogg_packet pkt = InitVorbisPacket(aData, aLength, bos, false, 0, mPacketCount++);
+ MOZ_ASSERT(mPacketCount <= 3);
+
+ int r = vorbis_synthesis_headerin(&mVorbisInfo,
+ &mVorbisComment,
+ &pkt);
+ return r == 0 ? NS_OK : NS_ERROR_FAILURE;
+}
+
+void
+VorbisDataDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &VorbisDataDecoder::ProcessDecode, aSample));
+}
+
+void
+VorbisDataDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mIsFlushing) {
+ return;
+ }
+
+ MediaResult rv = DoDecode(aSample);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(rv);
+ } else {
+ mCallback->InputExhausted();
+ }
+}
+
+MediaResult
+VorbisDataDecoder::DoDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ const unsigned char* aData = aSample->Data();
+ size_t aLength = aSample->Size();
+ int64_t aOffset = aSample->mOffset;
+ int64_t aTstampUsecs = aSample->mTime;
+ int64_t aTotalFrames = 0;
+
+ MOZ_ASSERT(mPacketCount >= 3);
+
+ if (!mLastFrameTime || mLastFrameTime.ref() != aSample->mTime) {
+ // We are starting a new block.
+ mFrames = 0;
+ mLastFrameTime = Some(aSample->mTime);
+ }
+
+ ogg_packet pkt = InitVorbisPacket(aData, aLength, false, aSample->mEOS,
+ aSample->mTimecode, mPacketCount++);
+
+ int err = vorbis_synthesis(&mVorbisBlock, &pkt);
+ if (err) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis:%d", err));
+ }
+
+ err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
+ if (err) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis_blockin:%d", err));
+ }
+
+ VorbisPCMValue** pcm = 0;
+ int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
+ if (frames == 0) {
+ return NS_OK;
+ }
+ while (frames > 0) {
+ uint32_t channels = mVorbisDsp.vi->channels;
+ uint32_t rate = mVorbisDsp.vi->rate;
+ AlignedAudioBuffer buffer(frames*channels);
+ if (!buffer) {
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ for (uint32_t j = 0; j < channels; ++j) {
+ VorbisPCMValue* channel = pcm[j];
+ for (uint32_t i = 0; i < uint32_t(frames); ++i) {
+ buffer[i*channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
+ }
+ }
+
+ CheckedInt64 duration = FramesToUsecs(frames, rate);
+ if (!duration.isValid()) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting audio duration"));
+ }
+ CheckedInt64 total_duration = FramesToUsecs(mFrames, rate);
+ if (!total_duration.isValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow converting audio total_duration"));
+ }
+
+ CheckedInt64 time = total_duration + aTstampUsecs;
+ if (!time.isValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Overflow adding total_duration and aTstampUsecs"));
+ };
+
+ if (!mAudioConverter) {
+ AudioConfig in(AudioConfig::ChannelLayout(channels, VorbisLayout(channels)),
+ rate);
+ AudioConfig out(channels, rate);
+ if (!in.IsValid() || !out.IsValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Invalid channel layout:%u", channels));
+ }
+ mAudioConverter = MakeUnique<AudioConverter>(in, out);
+ }
+ MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
+ AudioSampleBuffer data(Move(buffer));
+ data = mAudioConverter->Process(Move(data));
+
+ aTotalFrames += frames;
+ mCallback->Output(new AudioData(aOffset,
+ time.value(),
+ duration.value(),
+ frames,
+ data.Forget(),
+ channels,
+ rate));
+ mFrames += frames;
+ err = vorbis_synthesis_read(&mVorbisDsp, frames);
+ if (err) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("vorbis_synthesis_read:%d", err));
+ }
+
+ frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
+ }
+
+ return NS_OK;
+}
+
+void
+VorbisDataDecoder::ProcessDrain()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ mCallback->DrainComplete();
+}
+
+void
+VorbisDataDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &VorbisDataDecoder::ProcessDrain));
+}
+
+void
+VorbisDataDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mIsFlushing = true;
+ RefPtr<VorbisDataDecoder> self = this;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self] () {
+ // Ignore failed results from vorbis_synthesis_restart. They
+ // aren't fatal and it fails when ResetDecode is called at a
+ // time when no vorbis data has been read.
+ vorbis_synthesis_restart(&self->mVorbisDsp);
+ self->mLastFrameTime.reset();
+ });
+ SyncRunnable::DispatchToThread(mTaskQueue, r);
+ mIsFlushing = false;
+}
+
+/* static */
+bool
+VorbisDataDecoder::IsVorbis(const nsACString& aMimeType)
+{
+ return aMimeType.EqualsLiteral("audio/vorbis");
+}
+
+/* static */ const AudioConfig::Channel*
+VorbisDataDecoder::VorbisLayout(uint32_t aChannels)
+{
+ // From https://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
+ // Section 4.3.9.
+ typedef AudioConfig::Channel Channel;
+
+ switch (aChannels) {
+ case 1: // the stream is monophonic
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_MONO };
+ return config;
+ }
+ case 2: // the stream is stereo. channel order: left, right
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_LEFT, AudioConfig::CHANNEL_RIGHT };
+ return config;
+ }
+ case 3: // the stream is a 1d-surround encoding. channel order: left, center, right
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_LEFT, AudioConfig::CHANNEL_CENTER, AudioConfig::CHANNEL_RIGHT };
+ return config;
+ }
+ case 4: // the stream is quadraphonic surround. channel order: front left, front right, rear left, rear right
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_LEFT, AudioConfig::CHANNEL_RIGHT, AudioConfig::CHANNEL_LS, AudioConfig::CHANNEL_RS };
+ return config;
+ }
+ case 5: // the stream is five-channel surround. channel order: front left, center, front right, rear left, rear right
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_LEFT, AudioConfig::CHANNEL_CENTER, AudioConfig::CHANNEL_RIGHT, AudioConfig::CHANNEL_LS, AudioConfig::CHANNEL_RS };
+ return config;
+ }
+ case 6: // the stream is 5.1 surround. channel order: front left, center, front right, rear left, rear right, LFE
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_LEFT, AudioConfig::CHANNEL_CENTER, AudioConfig::CHANNEL_RIGHT, AudioConfig::CHANNEL_LS, AudioConfig::CHANNEL_RS, AudioConfig::CHANNEL_LFE };
+ return config;
+ }
+ case 7: // surround. channel order: front left, center, front right, side left, side right, rear center, LFE
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_LEFT, AudioConfig::CHANNEL_CENTER, AudioConfig::CHANNEL_RIGHT, AudioConfig::CHANNEL_LS, AudioConfig::CHANNEL_RS, AudioConfig::CHANNEL_RCENTER, AudioConfig::CHANNEL_LFE };
+ return config;
+ }
+ case 8: // the stream is 7.1 surround. channel order: front left, center, front right, side left, side right, rear left, rear right, LFE
+ {
+ static const Channel config[] = { AudioConfig::CHANNEL_LEFT, AudioConfig::CHANNEL_CENTER, AudioConfig::CHANNEL_RIGHT, AudioConfig::CHANNEL_LS, AudioConfig::CHANNEL_RS, AudioConfig::CHANNEL_RLS, AudioConfig::CHANNEL_RRS, AudioConfig::CHANNEL_LFE };
+ return config;
+ }
+ default:
+ return nullptr;
+ }
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/VorbisDecoder.h b/dom/media/platforms/agnostic/VorbisDecoder.h
new file mode 100644
index 000000000..0ed7bb645
--- /dev/null
+++ b/dom/media/platforms/agnostic/VorbisDecoder.h
@@ -0,0 +1,66 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(VorbisDecoder_h_)
+#define VorbisDecoder_h_
+
+#include "PlatformDecoderModule.h"
+#include "mozilla/Maybe.h"
+#include "AudioConverter.h"
+
+#ifdef MOZ_TREMOR
+#include "tremor/ivorbiscodec.h"
+#else
+#include "vorbis/codec.h"
+#endif
+
+namespace mozilla {
+
+class VorbisDataDecoder : public MediaDataDecoder
+{
+public:
+ explicit VorbisDataDecoder(const CreateDecoderParams& aParams);
+ ~VorbisDataDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "vorbis audio decoder";
+ }
+
+ // Return true if mimetype is Vorbis
+ static bool IsVorbis(const nsACString& aMimeType);
+ static const AudioConfig::Channel* VorbisLayout(uint32_t aChannels);
+
+private:
+ nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
+
+ void ProcessDecode(MediaRawData* aSample);
+ MediaResult DoDecode(MediaRawData* aSample);
+ void ProcessDrain();
+
+ const AudioInfo& mInfo;
+ const RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+
+ // Vorbis decoder state
+ vorbis_info mVorbisInfo;
+ vorbis_comment mVorbisComment;
+ vorbis_dsp_state mVorbisDsp;
+ vorbis_block mVorbisBlock;
+
+ int64_t mPacketCount;
+ int64_t mFrames;
+ Maybe<int64_t> mLastFrameTime;
+ UniquePtr<AudioConverter> mAudioConverter;
+ Atomic<bool> mIsFlushing;
+};
+
+} // namespace mozilla
+#endif
diff --git a/dom/media/platforms/agnostic/WAVDecoder.cpp b/dom/media/platforms/agnostic/WAVDecoder.cpp
new file mode 100644
index 000000000..67a045287
--- /dev/null
+++ b/dom/media/platforms/agnostic/WAVDecoder.cpp
@@ -0,0 +1,158 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WAVDecoder.h"
+#include "AudioSampleFormat.h"
+#include "mozilla/SyncRunnable.h"
+
+using mp4_demuxer::ByteReader;
+
+namespace mozilla {
+
+int16_t
+DecodeALawSample(uint8_t aValue)
+{
+ aValue = aValue ^ 0x55;
+ int8_t sign = (aValue & 0x80) ? -1 : 1;
+ uint8_t exponent = (aValue & 0x70) >> 4;
+ uint8_t mantissa = aValue & 0x0F;
+ int16_t sample = mantissa << 4;
+ switch (exponent) {
+ case 0:
+ sample += 8;
+ break;
+ case 1:
+ sample += 0x108;
+ break;
+ default:
+ sample += 0x108;
+ sample <<= exponent - 1;
+ }
+ return sign * sample;
+}
+
+int16_t
+DecodeULawSample(uint8_t aValue)
+{
+ aValue = aValue ^ 0xFF;
+ int8_t sign = (aValue & 0x80) ? -1 : 1;
+ uint8_t exponent = (aValue & 0x70) >> 4;
+ uint8_t mantissa = aValue & 0x0F;
+ int16_t sample = (33 + 2 * mantissa) * (2 << (exponent + 1)) - 33;
+ return sign * sample;
+}
+
+WaveDataDecoder::WaveDataDecoder(const CreateDecoderParams& aParams)
+ : mInfo(aParams.AudioConfig())
+ , mCallback(aParams.mCallback)
+{
+}
+
+void
+WaveDataDecoder::Shutdown()
+{
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+WaveDataDecoder::Init()
+{
+ return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
+}
+
+void
+WaveDataDecoder::Input(MediaRawData* aSample)
+{
+ MediaResult rv = DoDecode(aSample);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(rv);
+ } else {
+ mCallback->InputExhausted();
+ }
+}
+
+MediaResult
+WaveDataDecoder::DoDecode(MediaRawData* aSample)
+{
+ size_t aLength = aSample->Size();
+ ByteReader aReader(aSample->Data(), aLength);
+ int64_t aOffset = aSample->mOffset;
+ uint64_t aTstampUsecs = aSample->mTime;
+
+ int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
+
+ AlignedAudioBuffer buffer(frames * mInfo.mChannels);
+ if (!buffer) {
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ for (int i = 0; i < frames; ++i) {
+ for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
+ if (mInfo.mProfile == 6) { //ALAW Data
+ uint8_t v = aReader.ReadU8();
+ int16_t decoded = DecodeALawSample(v);
+ buffer[i * mInfo.mChannels + j] =
+ IntegerToAudioSample<AudioDataValue>(decoded);
+ } else if (mInfo.mProfile == 7) { //ULAW Data
+ uint8_t v = aReader.ReadU8();
+ int16_t decoded = DecodeULawSample(v);
+ buffer[i * mInfo.mChannels + j] =
+ IntegerToAudioSample<AudioDataValue>(decoded);
+ } else { //PCM Data
+ if (mInfo.mBitDepth == 8) {
+ uint8_t v = aReader.ReadU8();
+ buffer[i * mInfo.mChannels + j] =
+ UInt8bitToAudioSample<AudioDataValue>(v);
+ } else if (mInfo.mBitDepth == 16) {
+ int16_t v = aReader.ReadLE16();
+ buffer[i * mInfo.mChannels + j] =
+ IntegerToAudioSample<AudioDataValue>(v);
+ } else if (mInfo.mBitDepth == 24) {
+ int32_t v = aReader.ReadLE24();
+ buffer[i * mInfo.mChannels + j] =
+ Int24bitToAudioSample<AudioDataValue>(v);
+ }
+ }
+ }
+ }
+
+ int64_t duration = frames / mInfo.mRate;
+
+ mCallback->Output(new AudioData(aOffset,
+ aTstampUsecs,
+ duration,
+ frames,
+ Move(buffer),
+ mInfo.mChannels,
+ mInfo.mRate));
+
+ return NS_OK;
+}
+
+void
+WaveDataDecoder::Drain()
+{
+ mCallback->DrainComplete();
+}
+
+void
+WaveDataDecoder::Flush()
+{
+}
+
+/* static */
+bool
+WaveDataDecoder::IsWave(const nsACString& aMimeType)
+{
+ // Some WebAudio uses "audio/x-wav",
+ // WAVdemuxer uses "audio/wave; codecs=aNum".
+ return aMimeType.EqualsLiteral("audio/x-wav") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=6") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=7") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=65534");
+}
+
+} // namespace mozilla
+#undef LOG
diff --git a/dom/media/platforms/agnostic/WAVDecoder.h b/dom/media/platforms/agnostic/WAVDecoder.h
new file mode 100644
index 000000000..dea980168
--- /dev/null
+++ b/dom/media/platforms/agnostic/WAVDecoder.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(WaveDecoder_h_)
+#define WaveDecoder_h_
+
+#include "PlatformDecoderModule.h"
+#include "mp4_demuxer/ByteReader.h"
+
+namespace mozilla {
+
+class WaveDataDecoder : public MediaDataDecoder
+{
+public:
+ explicit WaveDataDecoder(const CreateDecoderParams& aParams);
+
+ // Return true if mimetype is Wave
+ static bool IsWave(const nsACString& aMimeType);
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "wave audio decoder";
+ }
+
+private:
+ MediaResult DoDecode(MediaRawData* aSample);
+
+ const AudioInfo& mInfo;
+ MediaDataDecoderCallback* mCallback;
+};
+
+} // namespace mozilla
+#endif
diff --git a/dom/media/platforms/agnostic/eme/EMEAudioDecoder.cpp b/dom/media/platforms/agnostic/eme/EMEAudioDecoder.cpp
new file mode 100644
index 000000000..25e502025
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEAudioDecoder.cpp
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "EMEAudioDecoder.h"
+#include "mozilla/CDMProxy.h"
+
+namespace mozilla {
+
+void
+EMEAudioCallbackAdapter::Error(GMPErr aErr)
+{
+ if (aErr == GMPNoKeyErr) {
+ // The GMP failed to decrypt a frame due to not having a key. This can
+ // happen if a key expires or a session is closed during playback.
+ NS_WARNING("GMP failed to decrypt due to lack of key");
+ return;
+ }
+ AudioCallbackAdapter::Error(aErr);
+}
+
+EMEAudioDecoder::EMEAudioDecoder(CDMProxy* aProxy,
+ const GMPAudioDecoderParams& aParams)
+ : GMPAudioDecoder(GMPAudioDecoderParams(aParams).WithAdapter(
+ new EMEAudioCallbackAdapter(aParams.mCallback)))
+ , mProxy(aProxy)
+{}
+
+void
+EMEAudioDecoder::InitTags(nsTArray<nsCString>& aTags)
+{
+ aTags.AppendElement(NS_LITERAL_CSTRING("aac"));
+ aTags.AppendElement(NS_ConvertUTF16toUTF8(mProxy->KeySystem()));
+}
+
+nsCString
+EMEAudioDecoder::GetNodeId()
+{
+ return mProxy->GetNodeId();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/eme/EMEAudioDecoder.h b/dom/media/platforms/agnostic/eme/EMEAudioDecoder.h
new file mode 100644
index 000000000..c5944cf8c
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEAudioDecoder.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef EMEAudioDecoder_h_
+#define EMEAudioDecoder_h_
+
+#include "GMPAudioDecoder.h"
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class EMEAudioCallbackAdapter : public AudioCallbackAdapter {
+public:
+ explicit EMEAudioCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback)
+ : AudioCallbackAdapter(aCallback)
+ {}
+
+ void Error(GMPErr aErr) override;
+};
+
+class EMEAudioDecoder : public GMPAudioDecoder {
+public:
+ EMEAudioDecoder(CDMProxy* aProxy, const GMPAudioDecoderParams& aParams);
+
+private:
+ void InitTags(nsTArray<nsCString>& aTags) override;
+ nsCString GetNodeId() override;
+
+ RefPtr<CDMProxy> mProxy;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
new file mode 100644
index 000000000..7d523d926
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
@@ -0,0 +1,307 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "EMEDecoderModule.h"
+#include "EMEAudioDecoder.h"
+#include "EMEVideoDecoder.h"
+#include "MediaDataDecoderProxy.h"
+#include "mozIGeckoMediaPluginService.h"
+#include "mozilla/CDMProxy.h"
+#include "mozilla/Unused.h"
+#include "nsAutoPtr.h"
+#include "nsServiceManagerUtils.h"
+#include "MediaInfo.h"
+#include "nsClassHashtable.h"
+#include "GMPDecoderModule.h"
+#include "MP4Decoder.h"
+
+namespace mozilla {
+
+typedef MozPromiseRequestHolder<CDMProxy::DecryptPromise> DecryptPromiseRequestHolder;
+
+class EMEDecryptor : public MediaDataDecoder {
+
+public:
+
+ EMEDecryptor(MediaDataDecoder* aDecoder,
+ MediaDataDecoderCallback* aCallback,
+ CDMProxy* aProxy,
+ TaskQueue* aDecodeTaskQueue)
+ : mDecoder(aDecoder)
+ , mCallback(aCallback)
+ , mTaskQueue(aDecodeTaskQueue)
+ , mProxy(aProxy)
+ , mSamplesWaitingForKey(new SamplesWaitingForKey(this, this->mCallback,
+ mTaskQueue, mProxy))
+ , mIsShutdown(false)
+ {
+ }
+
+ RefPtr<InitPromise> Init() override {
+ MOZ_ASSERT(!mIsShutdown);
+ return mDecoder->Init();
+ }
+
+ void Input(MediaRawData* aSample) override {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mIsShutdown) {
+ NS_WARNING("EME encrypted sample arrived after shutdown");
+ return;
+ }
+ if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
+ return;
+ }
+
+ nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
+ mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
+ writer->mCrypto.mSessionIds);
+
+ mDecrypts.Put(aSample, new DecryptPromiseRequestHolder());
+ mDecrypts.Get(aSample)->Begin(mProxy->Decrypt(aSample)->Then(
+ mTaskQueue, __func__, this,
+ &EMEDecryptor::Decrypted,
+ &EMEDecryptor::Decrypted));
+ return;
+ }
+
+ void Decrypted(const DecryptResult& aDecrypted) {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(aDecrypted.mSample);
+
+ nsAutoPtr<DecryptPromiseRequestHolder> holder;
+ mDecrypts.RemoveAndForget(aDecrypted.mSample, holder);
+ if (holder) {
+ holder->Complete();
+ } else {
+ // Decryption is not in the list of decrypt operations waiting
+ // for a result. It must have been flushed or drained. Ignore result.
+ return;
+ }
+
+ if (mIsShutdown) {
+ NS_WARNING("EME decrypted sample arrived after shutdown");
+ return;
+ }
+
+ if (aDecrypted.mStatus == NoKeyErr) {
+ // Key became unusable after we sent the sample to CDM to decrypt.
+ // Call Input() again, so that the sample is enqueued for decryption
+ // if the key becomes usable again.
+ Input(aDecrypted.mSample);
+ } else if (aDecrypted.mStatus != Ok) {
+ if (mCallback) {
+ mCallback->Error(MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("decrypted.mStatus=%u", uint32_t(aDecrypted.mStatus))));
+ }
+ } else {
+ MOZ_ASSERT(!mIsShutdown);
+ // The Adobe GMP AAC decoder gets confused if we pass it non-encrypted
+ // samples with valid crypto data. So clear the crypto data, since the
+ // sample should be decrypted now anyway. If we don't do this and we're
+ // using the Adobe GMP for unencrypted decoding of data that is decrypted
+ // by gmp-clearkey, decoding will fail.
+ UniquePtr<MediaRawDataWriter> writer(aDecrypted.mSample->CreateWriter());
+ writer->mCrypto = CryptoSample();
+ mDecoder->Input(aDecrypted.mSample);
+ }
+ }
+
+ void Flush() override {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(!mIsShutdown);
+ for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
+ nsAutoPtr<DecryptPromiseRequestHolder>& holder = iter.Data();
+ holder->DisconnectIfExists();
+ iter.Remove();
+ }
+ mDecoder->Flush();
+ mSamplesWaitingForKey->Flush();
+ }
+
+ void Drain() override {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(!mIsShutdown);
+ for (auto iter = mDecrypts.Iter(); !iter.Done(); iter.Next()) {
+ nsAutoPtr<DecryptPromiseRequestHolder>& holder = iter.Data();
+ holder->DisconnectIfExists();
+ iter.Remove();
+ }
+ mDecoder->Drain();
+ }
+
+ void Shutdown() override {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(!mIsShutdown);
+ mIsShutdown = true;
+ mDecoder->Shutdown();
+ mSamplesWaitingForKey->BreakCycles();
+ mSamplesWaitingForKey = nullptr;
+ mDecoder = nullptr;
+ mProxy = nullptr;
+ mCallback = nullptr;
+ }
+
+ const char* GetDescriptionName() const override {
+ return mDecoder->GetDescriptionName();
+ }
+
+private:
+
+ RefPtr<MediaDataDecoder> mDecoder;
+ MediaDataDecoderCallback* mCallback;
+ RefPtr<TaskQueue> mTaskQueue;
+ RefPtr<CDMProxy> mProxy;
+ nsClassHashtable<nsRefPtrHashKey<MediaRawData>, DecryptPromiseRequestHolder> mDecrypts;
+ RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
+ bool mIsShutdown;
+};
+
+class EMEMediaDataDecoderProxy : public MediaDataDecoderProxy {
+public:
+ EMEMediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread,
+ MediaDataDecoderCallback* aCallback,
+ CDMProxy* aProxy,
+ TaskQueue* aTaskQueue)
+ : MediaDataDecoderProxy(Move(aProxyThread), aCallback)
+ , mSamplesWaitingForKey(new SamplesWaitingForKey(this, aCallback,
+ aTaskQueue, aProxy))
+ , mProxy(aProxy)
+ {
+ }
+
+ void Input(MediaRawData* aSample) override;
+ void Shutdown() override;
+
+private:
+ RefPtr<SamplesWaitingForKey> mSamplesWaitingForKey;
+ RefPtr<CDMProxy> mProxy;
+};
+
+void
+EMEMediaDataDecoderProxy::Input(MediaRawData* aSample)
+{
+ if (mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)) {
+ return;
+ }
+
+ nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
+ mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
+ writer->mCrypto.mSessionIds);
+
+ MediaDataDecoderProxy::Input(aSample);
+}
+
+void
+EMEMediaDataDecoderProxy::Shutdown()
+{
+ MediaDataDecoderProxy::Shutdown();
+
+ mSamplesWaitingForKey->BreakCycles();
+ mSamplesWaitingForKey = nullptr;
+ mProxy = nullptr;
+}
+
+EMEDecoderModule::EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM)
+ : mProxy(aProxy)
+ , mPDM(aPDM)
+{
+}
+
+EMEDecoderModule::~EMEDecoderModule()
+{
+}
+
+static already_AddRefed<MediaDataDecoderProxy>
+CreateDecoderWrapper(MediaDataDecoderCallback* aCallback, CDMProxy* aProxy, TaskQueue* aTaskQueue)
+{
+ RefPtr<gmp::GeckoMediaPluginService> s(gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
+ if (!s) {
+ return nullptr;
+ }
+ RefPtr<AbstractThread> thread(s->GetAbstractGMPThread());
+ if (!thread) {
+ return nullptr;
+ }
+ RefPtr<MediaDataDecoderProxy> decoder(
+ new EMEMediaDataDecoderProxy(thread.forget(), aCallback, aProxy, aTaskQueue));
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+EMEDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ MOZ_ASSERT(aParams.mConfig.mCrypto.mValid);
+
+ if (SupportsMimeType(aParams.mConfig.mMimeType, nullptr)) {
+ // GMP decodes. Assume that means it can decrypt too.
+ RefPtr<MediaDataDecoderProxy> wrapper =
+ CreateDecoderWrapper(aParams.mCallback, mProxy, aParams.mTaskQueue);
+ auto params = GMPVideoDecoderParams(aParams).WithCallback(wrapper);
+ wrapper->SetProxyTarget(new EMEVideoDecoder(mProxy, params));
+ return wrapper.forget();
+ }
+
+ MOZ_ASSERT(mPDM);
+ RefPtr<MediaDataDecoder> decoder(mPDM->CreateDecoder(aParams));
+ if (!decoder) {
+ return nullptr;
+ }
+
+ RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
+ aParams.mCallback,
+ mProxy,
+ AbstractThread::GetCurrent()->AsTaskQueue()));
+ return emeDecoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+EMEDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ MOZ_ASSERT(aParams.mConfig.mCrypto.mValid);
+
+ if (SupportsMimeType(aParams.mConfig.mMimeType, nullptr)) {
+ // GMP decodes. Assume that means it can decrypt too.
+ RefPtr<MediaDataDecoderProxy> wrapper =
+ CreateDecoderWrapper(aParams.mCallback, mProxy, aParams.mTaskQueue);
+ auto gmpParams = GMPAudioDecoderParams(aParams).WithCallback(wrapper);
+ wrapper->SetProxyTarget(new EMEAudioDecoder(mProxy, gmpParams));
+ return wrapper.forget();
+ }
+
+ MOZ_ASSERT(mPDM);
+ RefPtr<MediaDataDecoder> decoder(mPDM->CreateDecoder(aParams));
+ if (!decoder) {
+ return nullptr;
+ }
+
+ RefPtr<MediaDataDecoder> emeDecoder(new EMEDecryptor(decoder,
+ aParams.mCallback,
+ mProxy,
+ AbstractThread::GetCurrent()->AsTaskQueue()));
+ return emeDecoder.forget();
+}
+
+PlatformDecoderModule::ConversionRequired
+EMEDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ if (aConfig.IsVideo() && MP4Decoder::IsH264(aConfig.mMimeType)) {
+ return ConversionRequired::kNeedAVCC;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+}
+
+bool
+EMEDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ Maybe<nsCString> gmp;
+ gmp.emplace(NS_ConvertUTF16toUTF8(mProxy->KeySystem()));
+ return GMPDecoderModule::SupportsMimeType(aMimeType, gmp);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/eme/EMEDecoderModule.h b/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
new file mode 100644
index 000000000..32074ae8c
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(EMEDecoderModule_h_)
+#define EMEDecoderModule_h_
+
+#include "PlatformDecoderModule.h"
+#include "PDMFactory.h"
+#include "gmp-decryption.h"
+
+namespace mozilla {
+
+class CDMProxy;
+
+class EMEDecoderModule : public PlatformDecoderModule {
+private:
+
+public:
+ EMEDecoderModule(CDMProxy* aProxy, PDMFactory* aPDM);
+
+ virtual ~EMEDecoderModule();
+
+protected:
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+
+ bool
+ SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+private:
+ RefPtr<CDMProxy> mProxy;
+ // Will be null if CDM has decoding capability.
+ RefPtr<PDMFactory> mPDM;
+ // We run the PDM on its own task queue.
+ RefPtr<TaskQueue> mTaskQueue;
+};
+
+} // namespace mozilla
+
+#endif // EMEDecoderModule_h_
diff --git a/dom/media/platforms/agnostic/eme/EMEVideoDecoder.cpp b/dom/media/platforms/agnostic/eme/EMEVideoDecoder.cpp
new file mode 100644
index 000000000..bddb89660
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEVideoDecoder.cpp
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "EMEVideoDecoder.h"
+#include "GMPVideoEncodedFrameImpl.h"
+#include "mozilla/CDMProxy.h"
+#include "MediaData.h"
+#include "MP4Decoder.h"
+#include "VPXDecoder.h"
+
+namespace mozilla {
+
+void
+EMEVideoCallbackAdapter::Error(GMPErr aErr)
+{
+ if (aErr == GMPNoKeyErr) {
+ // The GMP failed to decrypt a frame due to not having a key. This can
+ // happen if a key expires or a session is closed during playback.
+ NS_WARNING("GMP failed to decrypt due to lack of key");
+ return;
+ }
+ VideoCallbackAdapter::Error(aErr);
+}
+
+EMEVideoDecoder::EMEVideoDecoder(CDMProxy* aProxy,
+ const GMPVideoDecoderParams& aParams)
+ : GMPVideoDecoder(GMPVideoDecoderParams(aParams).WithAdapter(
+ new EMEVideoCallbackAdapter(aParams.mCallback,
+ VideoInfo(aParams.mConfig.mDisplay),
+ aParams.mImageContainer)))
+ , mProxy(aProxy)
+ , mDecryptorId(aProxy->GetDecryptorId())
+{}
+
+void
+EMEVideoDecoder::InitTags(nsTArray<nsCString>& aTags)
+{
+ VideoInfo config = GetConfig();
+ if (MP4Decoder::IsH264(config.mMimeType)) {
+ aTags.AppendElement(NS_LITERAL_CSTRING("h264"));
+ } else if (VPXDecoder::IsVP8(config.mMimeType)) {
+ aTags.AppendElement(NS_LITERAL_CSTRING("vp8"));
+ } else if (VPXDecoder::IsVP9(config.mMimeType)) {
+ aTags.AppendElement(NS_LITERAL_CSTRING("vp9"));
+ }
+ aTags.AppendElement(NS_ConvertUTF16toUTF8(mProxy->KeySystem()));
+}
+
+nsCString
+EMEVideoDecoder::GetNodeId()
+{
+ return mProxy->GetNodeId();
+}
+
+GMPUniquePtr<GMPVideoEncodedFrame>
+EMEVideoDecoder::CreateFrame(MediaRawData* aSample)
+{
+ GMPUniquePtr<GMPVideoEncodedFrame> frame = GMPVideoDecoder::CreateFrame(aSample);
+ if (frame && aSample->mCrypto.mValid) {
+ static_cast<gmp::GMPVideoEncodedFrameImpl*>(frame.get())->InitCrypto(aSample->mCrypto);
+ }
+ return frame;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/eme/EMEVideoDecoder.h b/dom/media/platforms/agnostic/eme/EMEVideoDecoder.h
new file mode 100644
index 000000000..a0f23f867
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/EMEVideoDecoder.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef EMEVideoDecoder_h_
+#define EMEVideoDecoder_h_
+
+#include "GMPVideoDecoder.h"
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class CDMProxy;
+class TaskQueue;
+
+class EMEVideoCallbackAdapter : public VideoCallbackAdapter {
+public:
+ EMEVideoCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback,
+ VideoInfo aVideoInfo,
+ layers::ImageContainer* aImageContainer)
+ : VideoCallbackAdapter(aCallback, aVideoInfo, aImageContainer)
+ {}
+
+ void Error(GMPErr aErr) override;
+};
+
+class EMEVideoDecoder : public GMPVideoDecoder {
+public:
+ EMEVideoDecoder(CDMProxy* aProxy, const GMPVideoDecoderParams& aParams);
+
+private:
+ void InitTags(nsTArray<nsCString>& aTags) override;
+ nsCString GetNodeId() override;
+ uint32_t DecryptorId() const override { return mDecryptorId; }
+ GMPUniquePtr<GMPVideoEncodedFrame> CreateFrame(MediaRawData* aSample) override;
+
+ RefPtr<CDMProxy> mProxy;
+ uint32_t mDecryptorId;
+};
+
+} // namespace mozilla
+
+#endif // EMEVideoDecoder_h_
diff --git a/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp
new file mode 100644
index 000000000..58098c2b3
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "SamplesWaitingForKey.h"
+#include "mozilla/CDMProxy.h"
+#include "mozilla/CDMCaps.h"
+#include "MediaData.h"
+
+namespace mozilla {
+
+SamplesWaitingForKey::SamplesWaitingForKey(MediaDataDecoder* aDecoder,
+ MediaDataDecoderCallback* aCallback,
+ TaskQueue* aTaskQueue,
+ CDMProxy* aProxy)
+ : mMutex("SamplesWaitingForKey")
+ , mDecoder(aDecoder)
+ , mDecoderCallback(aCallback)
+ , mTaskQueue(aTaskQueue)
+ , mProxy(aProxy)
+{
+}
+
+SamplesWaitingForKey::~SamplesWaitingForKey()
+{
+}
+
+bool
+SamplesWaitingForKey::WaitIfKeyNotUsable(MediaRawData* aSample)
+{
+ if (!aSample || !aSample->mCrypto.mValid || !mProxy) {
+ return false;
+ }
+ CDMCaps::AutoLock caps(mProxy->Capabilites());
+ const auto& keyid = aSample->mCrypto.mKeyId;
+ if (!caps.IsKeyUsable(keyid)) {
+ {
+ MutexAutoLock lock(mMutex);
+ mSamples.AppendElement(aSample);
+ }
+ mDecoderCallback->WaitingForKey();
+ caps.NotifyWhenKeyIdUsable(aSample->mCrypto.mKeyId, this);
+ return true;
+ }
+ return false;
+}
+
+void
+SamplesWaitingForKey::NotifyUsable(const CencKeyId& aKeyId)
+{
+ MutexAutoLock lock(mMutex);
+ size_t i = 0;
+ while (i < mSamples.Length()) {
+ if (aKeyId == mSamples[i]->mCrypto.mKeyId) {
+ RefPtr<nsIRunnable> task;
+ task = NewRunnableMethod<RefPtr<MediaRawData>>(mDecoder,
+ &MediaDataDecoder::Input,
+ RefPtr<MediaRawData>(mSamples[i]));
+ mSamples.RemoveElementAt(i);
+ mTaskQueue->Dispatch(task.forget());
+ } else {
+ i++;
+ }
+ }
+}
+
+void
+SamplesWaitingForKey::Flush()
+{
+ MutexAutoLock lock(mMutex);
+ mSamples.Clear();
+}
+
+void
+SamplesWaitingForKey::BreakCycles()
+{
+ MutexAutoLock lock(mMutex);
+ mDecoder = nullptr;
+ mTaskQueue = nullptr;
+ mProxy = nullptr;
+ mSamples.Clear();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h
new file mode 100644
index 000000000..65bb14403
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef SamplesWaitingForKey_h_
+#define SamplesWaitingForKey_h_
+
+#include "mozilla/TaskQueue.h"
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+typedef nsTArray<uint8_t> CencKeyId;
+
+class CDMProxy;
+
+// Encapsulates the task of waiting for the CDMProxy to have the necessary
+// keys to decrypt a given sample.
+class SamplesWaitingForKey {
+public:
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SamplesWaitingForKey)
+
+ explicit SamplesWaitingForKey(MediaDataDecoder* aDecoder,
+ MediaDataDecoderCallback* aCallback,
+ TaskQueue* aTaskQueue,
+ CDMProxy* aProxy);
+
+ // Returns true if we need to wait for a key to become usable.
+ // Will callback MediaDataDecoder::Input(aSample) on mDecoder once the
+ // sample is ready to be decrypted. The order of input samples is
+ // preserved.
+ bool WaitIfKeyNotUsable(MediaRawData* aSample);
+
+ void NotifyUsable(const CencKeyId& aKeyId);
+
+ void Flush();
+
+ void BreakCycles();
+
+protected:
+ ~SamplesWaitingForKey();
+
+private:
+ Mutex mMutex;
+ RefPtr<MediaDataDecoder> mDecoder;
+ MediaDataDecoderCallback* mDecoderCallback;
+ RefPtr<TaskQueue> mTaskQueue;
+ RefPtr<CDMProxy> mProxy;
+ nsTArray<RefPtr<MediaRawData>> mSamples;
+};
+
+} // namespace mozilla
+
+#endif // SamplesWaitingForKey_h_
diff --git a/dom/media/platforms/agnostic/eme/moz.build b/dom/media/platforms/agnostic/eme/moz.build
new file mode 100644
index 000000000..78b76d4d4
--- /dev/null
+++ b/dom/media/platforms/agnostic/eme/moz.build
@@ -0,0 +1,23 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'EMEAudioDecoder.h',
+ 'EMEDecoderModule.h',
+ 'EMEVideoDecoder.h',
+ 'SamplesWaitingForKey.h',
+]
+
+UNIFIED_SOURCES += [
+ 'EMEAudioDecoder.cpp',
+ 'EMEDecoderModule.cpp',
+ 'EMEVideoDecoder.cpp',
+ 'SamplesWaitingForKey.cpp',
+]
+
+include('/ipc/chromium/chromium-config.mozbuild')
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp
new file mode 100644
index 000000000..d863d44d4
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.cpp
@@ -0,0 +1,306 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "GMPAudioDecoder.h"
+#include "nsServiceManagerUtils.h"
+#include "MediaInfo.h"
+#include "GMPDecoderModule.h"
+#include "nsPrintfCString.h"
+
+namespace mozilla {
+
+#if defined(DEBUG)
+bool IsOnGMPThread()
+{
+ nsCOMPtr<mozIGeckoMediaPluginService> mps = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ MOZ_ASSERT(mps);
+
+ nsCOMPtr<nsIThread> gmpThread;
+ nsresult rv = mps->GetThread(getter_AddRefs(gmpThread));
+ MOZ_ASSERT(NS_SUCCEEDED(rv) && gmpThread);
+ return NS_GetCurrentThread() == gmpThread;
+}
+#endif
+
+void
+AudioCallbackAdapter::Decoded(const nsTArray<int16_t>& aPCM, uint64_t aTimeStamp, uint32_t aChannels, uint32_t aRate)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (aRate == 0 || aChannels == 0) {
+ mCallback->Error(MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL(
+ "Invalid rate or num channels returned on GMP audio samples")));
+ return;
+ }
+
+ size_t numFrames = aPCM.Length() / aChannels;
+ MOZ_ASSERT((aPCM.Length() % aChannels) == 0);
+ AlignedAudioBuffer audioData(aPCM.Length());
+ if (!audioData) {
+ mCallback->Error(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("Unable to allocate audio buffer")));
+ return;
+ }
+
+ for (size_t i = 0; i < aPCM.Length(); ++i) {
+ audioData[i] = AudioSampleToFloat(aPCM[i]);
+ }
+
+ if (mMustRecaptureAudioPosition) {
+ mAudioFrameSum = 0;
+ auto timestamp = UsecsToFrames(aTimeStamp, aRate);
+ if (!timestamp.isValid()) {
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Invalid timestamp")));
+ return;
+ }
+ mAudioFrameOffset = timestamp.value();
+ mMustRecaptureAudioPosition = false;
+ }
+
+ auto timestamp = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, aRate);
+ if (!timestamp.isValid()) {
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Invalid timestamp on audio samples")));
+ return;
+ }
+ mAudioFrameSum += numFrames;
+
+ auto duration = FramesToUsecs(numFrames, aRate);
+ if (!duration.isValid()) {
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Invalid duration on audio samples")));
+ return;
+ }
+
+ RefPtr<AudioData> audio(new AudioData(mLastStreamOffset,
+ timestamp.value(),
+ duration.value(),
+ numFrames,
+ Move(audioData),
+ aChannels,
+ aRate));
+
+#ifdef LOG_SAMPLE_DECODE
+ LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
+ timestamp, duration, currentLength);
+#endif
+
+ mCallback->Output(audio);
+}
+
+void
+AudioCallbackAdapter::InputDataExhausted()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mCallback->InputExhausted();
+}
+
+void
+AudioCallbackAdapter::DrainComplete()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mCallback->DrainComplete();
+}
+
+void
+AudioCallbackAdapter::ResetComplete()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mMustRecaptureAudioPosition = true;
+ mCallback->FlushComplete();
+}
+
+void
+AudioCallbackAdapter::Error(GMPErr aErr)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mCallback->Error(MediaResult(aErr == GMPDecodeErr
+ ? NS_ERROR_DOM_MEDIA_DECODE_ERR
+ : NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("GMPErr:%x", aErr)));
+}
+
+void
+AudioCallbackAdapter::Terminated()
+{
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Audio GMP decoder terminated.")));
+}
+
+GMPAudioDecoderParams::GMPAudioDecoderParams(const CreateDecoderParams& aParams)
+ : mConfig(aParams.AudioConfig())
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(nullptr)
+ , mAdapter(nullptr)
+ , mCrashHelper(aParams.mCrashHelper)
+{}
+
+GMPAudioDecoderParams&
+GMPAudioDecoderParams::WithCallback(MediaDataDecoderProxy* aWrapper)
+{
+ MOZ_ASSERT(aWrapper);
+ MOZ_ASSERT(!mCallback); // Should only be called once per instance.
+ mCallback = aWrapper->Callback();
+ mAdapter = nullptr;
+ return *this;
+}
+
+GMPAudioDecoderParams&
+GMPAudioDecoderParams::WithAdapter(AudioCallbackAdapter* aAdapter)
+{
+ MOZ_ASSERT(aAdapter);
+ MOZ_ASSERT(!mAdapter); // Should only be called once per instance.
+ mCallback = aAdapter->Callback();
+ mAdapter = aAdapter;
+ return *this;
+}
+
+GMPAudioDecoder::GMPAudioDecoder(const GMPAudioDecoderParams& aParams)
+ : mConfig(aParams.mConfig)
+ , mCallback(aParams.mCallback)
+ , mGMP(nullptr)
+ , mAdapter(aParams.mAdapter)
+ , mCrashHelper(aParams.mCrashHelper)
+{
+ MOZ_ASSERT(!mAdapter || mCallback == mAdapter->Callback());
+ if (!mAdapter) {
+ mAdapter = new AudioCallbackAdapter(mCallback);
+ }
+}
+
+void
+GMPAudioDecoder::InitTags(nsTArray<nsCString>& aTags)
+{
+ aTags.AppendElement(NS_LITERAL_CSTRING("aac"));
+ const Maybe<nsCString> gmp(
+ GMPDecoderModule::PreferredGMP(NS_LITERAL_CSTRING("audio/mp4a-latm")));
+ if (gmp.isSome()) {
+ aTags.AppendElement(gmp.value());
+ }
+}
+
+nsCString
+GMPAudioDecoder::GetNodeId()
+{
+ return SHARED_GMP_DECODING_NODE_ID;
+}
+
+void
+GMPAudioDecoder::GMPInitDone(GMPAudioDecoderProxy* aGMP)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (!aGMP) {
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+ if (mInitPromise.IsEmpty()) {
+ // GMP must have been shutdown while we were waiting for Init operation
+ // to complete.
+ aGMP->Close();
+ return;
+ }
+ nsTArray<uint8_t> codecSpecific;
+ codecSpecific.AppendElements(mConfig.mCodecSpecificConfig->Elements(),
+ mConfig.mCodecSpecificConfig->Length());
+
+ nsresult rv = aGMP->InitDecode(kGMPAudioCodecAAC,
+ mConfig.mChannels,
+ mConfig.mBitDepth,
+ mConfig.mRate,
+ codecSpecific,
+ mAdapter);
+ if (NS_FAILED(rv)) {
+ aGMP->Close();
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+
+ mGMP = aGMP;
+ mInitPromise.Resolve(TrackInfo::kAudioTrack, __func__);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+GMPAudioDecoder::Init()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ MOZ_ASSERT(mMPS);
+
+ RefPtr<InitPromise> promise(mInitPromise.Ensure(__func__));
+
+ nsTArray<nsCString> tags;
+ InitTags(tags);
+ UniquePtr<GetGMPAudioDecoderCallback> callback(new GMPInitDoneCallback(this));
+ if (NS_FAILED(mMPS->GetGMPAudioDecoder(mCrashHelper, &tags, GetNodeId(), Move(callback)))) {
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return promise;
+}
+
+void
+GMPAudioDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ RefPtr<MediaRawData> sample(aSample);
+ if (!mGMP) {
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("mGMP not initialized")));
+ return;
+ }
+
+ mAdapter->SetLastStreamOffset(sample->mOffset);
+
+ gmp::GMPAudioSamplesImpl samples(sample, mConfig.mChannels, mConfig.mRate);
+ nsresult rv = mGMP->Decode(samples);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(MediaResult(rv, __func__));
+ }
+}
+
+void
+GMPAudioDecoder::Flush()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (!mGMP || NS_FAILED(mGMP->Reset())) {
+ // Abort the flush.
+ mCallback->FlushComplete();
+ }
+}
+
+void
+GMPAudioDecoder::Drain()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (!mGMP || NS_FAILED(mGMP->Drain())) {
+ mCallback->DrainComplete();
+ }
+}
+
+void
+GMPAudioDecoder::Shutdown()
+{
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ if (!mGMP) {
+ return;
+ }
+ // Note this unblocks flush and drain operations waiting for callbacks.
+ mGMP->Close();
+ mGMP = nullptr;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h
new file mode 100644
index 000000000..90e3ebdb6
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPAudioDecoder.h
@@ -0,0 +1,112 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GMPAudioDecoder_h_)
+#define GMPAudioDecoder_h_
+
+#include "GMPAudioDecoderProxy.h"
+#include "MediaDataDecoderProxy.h"
+#include "PlatformDecoderModule.h"
+#include "mozIGeckoMediaPluginService.h"
+#include "nsAutoPtr.h"
+
+namespace mozilla {
+
+class AudioCallbackAdapter : public GMPAudioDecoderCallbackProxy {
+public:
+ explicit AudioCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback)
+ : mCallback(aCallback)
+ , mLastStreamOffset(0)
+ , mAudioFrameSum(0)
+ , mAudioFrameOffset(0)
+ , mMustRecaptureAudioPosition(true)
+ {}
+
+ MediaDataDecoderCallbackProxy* Callback() const { return mCallback; }
+
+ // GMPAudioDecoderCallbackProxy
+ void Decoded(const nsTArray<int16_t>& aPCM, uint64_t aTimeStamp, uint32_t aChannels, uint32_t aRate) override;
+ void InputDataExhausted() override;
+ void DrainComplete() override;
+ void ResetComplete() override;
+ void Error(GMPErr aErr) override;
+ void Terminated() override;
+
+ void SetLastStreamOffset(int64_t aStreamOffset) {
+ mLastStreamOffset = aStreamOffset;
+ }
+
+private:
+ MediaDataDecoderCallbackProxy* mCallback;
+ int64_t mLastStreamOffset;
+
+ int64_t mAudioFrameSum;
+ int64_t mAudioFrameOffset;
+ bool mMustRecaptureAudioPosition;
+};
+
+struct GMPAudioDecoderParams {
+ explicit GMPAudioDecoderParams(const CreateDecoderParams& aParams);
+ GMPAudioDecoderParams& WithCallback(MediaDataDecoderProxy* aWrapper);
+ GMPAudioDecoderParams& WithAdapter(AudioCallbackAdapter* aAdapter);
+
+ const AudioInfo& mConfig;
+ TaskQueue* mTaskQueue;
+ MediaDataDecoderCallbackProxy* mCallback;
+ AudioCallbackAdapter* mAdapter;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+};
+
+class GMPAudioDecoder : public MediaDataDecoder {
+public:
+ explicit GMPAudioDecoder(const GMPAudioDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "GMP audio decoder";
+ }
+
+protected:
+ virtual void InitTags(nsTArray<nsCString>& aTags);
+ virtual nsCString GetNodeId();
+
+private:
+
+ class GMPInitDoneCallback : public GetGMPAudioDecoderCallback
+ {
+ public:
+ explicit GMPInitDoneCallback(GMPAudioDecoder* aDecoder)
+ : mDecoder(aDecoder)
+ {
+ }
+
+ void Done(GMPAudioDecoderProxy* aGMP) override
+ {
+ mDecoder->GMPInitDone(aGMP);
+ }
+
+ private:
+ RefPtr<GMPAudioDecoder> mDecoder;
+ };
+ void GMPInitDone(GMPAudioDecoderProxy* aGMP);
+
+ const AudioInfo mConfig;
+ MediaDataDecoderCallbackProxy* mCallback;
+ nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
+ GMPAudioDecoderProxy* mGMP;
+ nsAutoPtr<AudioCallbackAdapter> mAdapter;
+ MozPromiseHolder<InitPromise> mInitPromise;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+};
+
+} // namespace mozilla
+
+#endif // GMPAudioDecoder_h_
diff --git a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
new file mode 100644
index 000000000..cc53d2c93
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
@@ -0,0 +1,172 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "GMPDecoderModule.h"
+#include "DecoderDoctorDiagnostics.h"
+#include "GMPAudioDecoder.h"
+#include "GMPVideoDecoder.h"
+#include "GMPUtils.h"
+#include "MediaDataDecoderProxy.h"
+#include "MediaPrefs.h"
+#include "VideoUtils.h"
+#include "mozIGeckoMediaPluginService.h"
+#include "nsServiceManagerUtils.h"
+#include "mozilla/StaticMutex.h"
+#include "gmp-audio-decode.h"
+#include "gmp-video-decode.h"
+#include "MP4Decoder.h"
+#include "VPXDecoder.h"
+#ifdef XP_WIN
+#include "WMFDecoderModule.h"
+#endif
+
+namespace mozilla {
+
+GMPDecoderModule::GMPDecoderModule()
+{
+}
+
+GMPDecoderModule::~GMPDecoderModule()
+{
+}
+
+static already_AddRefed<MediaDataDecoderProxy>
+CreateDecoderWrapper(MediaDataDecoderCallback* aCallback)
+{
+ RefPtr<gmp::GeckoMediaPluginService> s(gmp::GeckoMediaPluginService::GetGeckoMediaPluginService());
+ if (!s) {
+ return nullptr;
+ }
+ RefPtr<AbstractThread> thread(s->GetAbstractGMPThread());
+ if (!thread) {
+ return nullptr;
+ }
+ RefPtr<MediaDataDecoderProxy> decoder(new MediaDataDecoderProxy(thread.forget(), aCallback));
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+GMPDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ if (!MP4Decoder::IsH264(aParams.mConfig.mMimeType) &&
+ !VPXDecoder::IsVP8(aParams.mConfig.mMimeType) &&
+ !VPXDecoder::IsVP9(aParams.mConfig.mMimeType)) {
+ return nullptr;
+ }
+
+ if (aParams.mDiagnostics) {
+ const Maybe<nsCString> preferredGMP = PreferredGMP(aParams.mConfig.mMimeType);
+ if (preferredGMP.isSome()) {
+ aParams.mDiagnostics->SetGMP(preferredGMP.value());
+ }
+ }
+
+ RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aParams.mCallback);
+ auto params = GMPVideoDecoderParams(aParams).WithCallback(wrapper);
+ wrapper->SetProxyTarget(new GMPVideoDecoder(params));
+ return wrapper.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+GMPDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ if (!aParams.mConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ return nullptr;
+ }
+
+ if (aParams.mDiagnostics) {
+ const Maybe<nsCString> preferredGMP = PreferredGMP(aParams.mConfig.mMimeType);
+ if (preferredGMP.isSome()) {
+ aParams.mDiagnostics->SetGMP(preferredGMP.value());
+ }
+ }
+
+ RefPtr<MediaDataDecoderProxy> wrapper = CreateDecoderWrapper(aParams.mCallback);
+ auto params = GMPAudioDecoderParams(aParams).WithCallback(wrapper);
+ wrapper->SetProxyTarget(new GMPAudioDecoder(params));
+ return wrapper.forget();
+}
+
+PlatformDecoderModule::ConversionRequired
+GMPDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ // GMPVideoCodecType::kGMPVideoCodecH264 specifies that encoded frames must be in AVCC format.
+ if (aConfig.IsVideo() && MP4Decoder::IsH264(aConfig.mMimeType)) {
+ return ConversionRequired::kNeedAVCC;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+}
+
+/* static */
+const Maybe<nsCString>
+GMPDecoderModule::PreferredGMP(const nsACString& aMimeType)
+{
+ Maybe<nsCString> rv;
+ if (aMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ switch (MediaPrefs::GMPAACPreferred()) {
+ case 1: rv.emplace(kEMEKeySystemClearkey); break;
+ case 2: rv.emplace(kEMEKeySystemPrimetime); break;
+ default: break;
+ }
+ }
+
+ if (MP4Decoder::IsH264(aMimeType)) {
+ switch (MediaPrefs::GMPH264Preferred()) {
+ case 1: rv.emplace(kEMEKeySystemClearkey); break;
+ case 2: rv.emplace(kEMEKeySystemPrimetime); break;
+ default: break;
+ }
+ }
+
+ return rv;
+}
+
+/* static */
+bool
+GMPDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ const Maybe<nsCString>& aGMP)
+{
+ if (aGMP.isNothing()) {
+ return false;
+ }
+
+ if (MP4Decoder::IsH264(aMimeType)) {
+ return HaveGMPFor(NS_LITERAL_CSTRING(GMP_API_VIDEO_DECODER),
+ { NS_LITERAL_CSTRING("h264"), aGMP.value()});
+ }
+
+ if (VPXDecoder::IsVP9(aMimeType)) {
+ return HaveGMPFor(NS_LITERAL_CSTRING(GMP_API_VIDEO_DECODER),
+ { NS_LITERAL_CSTRING("vp9"), aGMP.value()});
+ }
+
+ if (VPXDecoder::IsVP8(aMimeType)) {
+ return HaveGMPFor(NS_LITERAL_CSTRING(GMP_API_VIDEO_DECODER),
+ { NS_LITERAL_CSTRING("vp8"), aGMP.value()});
+ }
+
+ if (MP4Decoder::IsAAC(aMimeType)) {
+ return HaveGMPFor(NS_LITERAL_CSTRING(GMP_API_AUDIO_DECODER),
+ { NS_LITERAL_CSTRING("aac"), aGMP.value()});
+ }
+
+ return false;
+}
+
+bool
+GMPDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ const Maybe<nsCString> preferredGMP = PreferredGMP(aMimeType);
+ bool rv = SupportsMimeType(aMimeType, preferredGMP);
+ if (rv && aDiagnostics && preferredGMP.isSome()) {
+ aDiagnostics->SetGMP(preferredGMP.value());
+ }
+ return rv;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h
new file mode 100644
index 000000000..b501ecb54
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPDecoderModule.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GMPDecoderModule_h_)
+#define GMPDecoderModule_h_
+
+#include "PlatformDecoderModule.h"
+#include "mozilla/Maybe.h"
+
+// The special NodeId we use when doing unencrypted decoding using the GMP's
+// decoder. This ensures that each GMP MediaDataDecoder we create doesn't
+// require spinning up a new process, but instead we run all instances of
+// GMP decoders in the one process, to reduce overhead.
+//
+// Note: GMP storage is isolated by NodeId, and non persistent for this
+// special NodeId, and the only way a GMP can communicate with the outside
+// world is through the EME GMP APIs, and we never run EME with this NodeID
+// (because NodeIds are random strings which can't contain the '-' character),
+// so there's no way a malicious GMP can harvest, store, and then report any
+// privacy sensitive data about what users are watching.
+#define SHARED_GMP_DECODING_NODE_ID NS_LITERAL_CSTRING("gmp-shared-decoding")
+
+namespace mozilla {
+
+class GMPDecoderModule : public PlatformDecoderModule {
+public:
+ GMPDecoderModule();
+
+ virtual ~GMPDecoderModule();
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+
+ bool
+ SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ static const Maybe<nsCString> PreferredGMP(const nsACString& aMimeType);
+
+ static bool SupportsMimeType(const nsACString& aMimeType,
+ const Maybe<nsCString>& aGMP);
+};
+
+} // namespace mozilla
+
+#endif // GMPDecoderModule_h_
diff --git a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
new file mode 100644
index 000000000..912b88ce1
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -0,0 +1,387 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "GMPVideoDecoder.h"
+#include "GMPVideoHost.h"
+#include "mozilla/EndianUtils.h"
+#include "prsystem.h"
+#include "MediaData.h"
+#include "GMPDecoderModule.h"
+#include "VPXDecoder.h"
+
+namespace mozilla {
+
+#if defined(DEBUG)
+extern bool IsOnGMPThread();
+#endif
+
+void
+VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
+{
+ GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);
+
+ MOZ_ASSERT(IsOnGMPThread());
+
+ VideoData::YCbCrBuffer b;
+ for (int i = 0; i < kGMPNumOfPlanes; ++i) {
+ b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
+ b.mPlanes[i].mStride = decodedFrame->Stride(GMPPlaneType(i));
+ if (i == kGMPYPlane) {
+ b.mPlanes[i].mWidth = decodedFrame->Width();
+ b.mPlanes[i].mHeight = decodedFrame->Height();
+ } else {
+ b.mPlanes[i].mWidth = (decodedFrame->Width() + 1) / 2;
+ b.mPlanes[i].mHeight = (decodedFrame->Height() + 1) / 2;
+ }
+ b.mPlanes[i].mOffset = 0;
+ b.mPlanes[i].mSkip = 0;
+ }
+
+ gfx::IntRect pictureRegion(0, 0, decodedFrame->Width(), decodedFrame->Height());
+ RefPtr<VideoData> v =
+ VideoData::CreateAndCopyData(mVideoInfo,
+ mImageContainer,
+ mLastStreamOffset,
+ decodedFrame->Timestamp(),
+ decodedFrame->Duration(),
+ b,
+ false,
+ -1,
+ pictureRegion);
+ if (v) {
+ mCallback->Output(v);
+ } else {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+ }
+}
+
+void
+VideoCallbackAdapter::ReceivedDecodedReferenceFrame(const uint64_t aPictureId)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+}
+
+void
+VideoCallbackAdapter::ReceivedDecodedFrame(const uint64_t aPictureId)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+}
+
+void
+VideoCallbackAdapter::InputDataExhausted()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mCallback->InputExhausted();
+}
+
+void
+VideoCallbackAdapter::DrainComplete()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mCallback->DrainComplete();
+}
+
+void
+VideoCallbackAdapter::ResetComplete()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mCallback->FlushComplete();
+}
+
+void
+VideoCallbackAdapter::Error(GMPErr aErr)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+ mCallback->Error(MediaResult(aErr == GMPDecodeErr
+ ? NS_ERROR_DOM_MEDIA_DECODE_ERR
+ : NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("GMPErr:%x", aErr)));
+}
+
+void
+VideoCallbackAdapter::Terminated()
+{
+ // Note that this *may* be called from the proxy thread also.
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Video GMP decoder terminated.")));
+}
+
+GMPVideoDecoderParams::GMPVideoDecoderParams(const CreateDecoderParams& aParams)
+ : mConfig(aParams.VideoConfig())
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(nullptr)
+ , mAdapter(nullptr)
+ , mImageContainer(aParams.mImageContainer)
+ , mLayersBackend(aParams.GetLayersBackend())
+ , mCrashHelper(aParams.mCrashHelper)
+{}
+
+GMPVideoDecoderParams&
+GMPVideoDecoderParams::WithCallback(MediaDataDecoderProxy* aWrapper)
+{
+ MOZ_ASSERT(aWrapper);
+ MOZ_ASSERT(!mCallback); // Should only be called once per instance.
+ mCallback = aWrapper->Callback();
+ mAdapter = nullptr;
+ return *this;
+}
+
+GMPVideoDecoderParams&
+GMPVideoDecoderParams::WithAdapter(VideoCallbackAdapter* aAdapter)
+{
+ MOZ_ASSERT(aAdapter);
+ MOZ_ASSERT(!mAdapter); // Should only be called once per instance.
+ mCallback = aAdapter->Callback();
+ mAdapter = aAdapter;
+ return *this;
+}
+
+GMPVideoDecoder::GMPVideoDecoder(const GMPVideoDecoderParams& aParams)
+ : mConfig(aParams.mConfig)
+ , mCallback(aParams.mCallback)
+ , mGMP(nullptr)
+ , mHost(nullptr)
+ , mAdapter(aParams.mAdapter)
+ , mConvertNALUnitLengths(false)
+ , mCrashHelper(aParams.mCrashHelper)
+{
+ MOZ_ASSERT(!mAdapter || mCallback == mAdapter->Callback());
+ if (!mAdapter) {
+ mAdapter = new VideoCallbackAdapter(mCallback,
+ VideoInfo(mConfig.mDisplay.width,
+ mConfig.mDisplay.height),
+ aParams.mImageContainer);
+ }
+}
+
+void
+GMPVideoDecoder::InitTags(nsTArray<nsCString>& aTags)
+{
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ aTags.AppendElement(NS_LITERAL_CSTRING("h264"));
+ const Maybe<nsCString> gmp(
+ GMPDecoderModule::PreferredGMP(NS_LITERAL_CSTRING("video/avc")));
+ if (gmp.isSome()) {
+ aTags.AppendElement(gmp.value());
+ }
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ aTags.AppendElement(NS_LITERAL_CSTRING("vp8"));
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ aTags.AppendElement(NS_LITERAL_CSTRING("vp9"));
+ }
+}
+
+nsCString
+GMPVideoDecoder::GetNodeId()
+{
+ return SHARED_GMP_DECODING_NODE_ID;
+}
+
+GMPUniquePtr<GMPVideoEncodedFrame>
+GMPVideoDecoder::CreateFrame(MediaRawData* aSample)
+{
+ GMPVideoFrame* ftmp = nullptr;
+ GMPErr err = mHost->CreateFrame(kGMPEncodedVideoFrame, &ftmp);
+ if (GMP_FAILED(err)) {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("Host::CreateFrame:%x", err)));
+ return nullptr;
+ }
+
+ GMPUniquePtr<GMPVideoEncodedFrame> frame(static_cast<GMPVideoEncodedFrame*>(ftmp));
+ err = frame->CreateEmptyFrame(aSample->Size());
+ if (GMP_FAILED(err)) {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("GMPVideoEncodedFrame::CreateEmptyFrame:%x", err)));
+ return nullptr;
+ }
+
+ memcpy(frame->Buffer(), aSample->Data(), frame->Size());
+
+ // Convert 4-byte NAL unit lengths to host-endian 4-byte buffer lengths to
+ // suit the GMP API.
+ if (mConvertNALUnitLengths) {
+ const int kNALLengthSize = 4;
+ uint8_t* buf = frame->Buffer();
+ while (buf < frame->Buffer() + frame->Size() - kNALLengthSize) {
+ uint32_t length = BigEndian::readUint32(buf) + kNALLengthSize;
+ *reinterpret_cast<uint32_t *>(buf) = length;
+ buf += length;
+ }
+ }
+
+ frame->SetBufferType(GMP_BufferLength32);
+
+ frame->SetEncodedWidth(mConfig.mDisplay.width);
+ frame->SetEncodedHeight(mConfig.mDisplay.height);
+ frame->SetTimeStamp(aSample->mTime);
+ frame->SetCompleteFrame(true);
+ frame->SetDuration(aSample->mDuration);
+ frame->SetFrameType(aSample->mKeyframe ? kGMPKeyFrame : kGMPDeltaFrame);
+
+ return frame;
+}
+
+const VideoInfo&
+GMPVideoDecoder::GetConfig() const
+{
+ return mConfig;
+}
+
+void
+GMPVideoDecoder::GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (!aGMP) {
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+ MOZ_ASSERT(aHost);
+
+ if (mInitPromise.IsEmpty()) {
+ // GMP must have been shutdown while we were waiting for Init operation
+ // to complete.
+ aGMP->Close();
+ return;
+ }
+
+ GMPVideoCodec codec;
+ memset(&codec, 0, sizeof(codec));
+
+ codec.mGMPApiVersion = kGMPVersion33;
+ nsTArray<uint8_t> codecSpecific;
+ if (MP4Decoder::IsH264(mConfig.mMimeType)) {
+ codec.mCodecType = kGMPVideoCodecH264;
+ codecSpecific.AppendElement(0); // mPacketizationMode.
+ codecSpecific.AppendElements(mConfig.mExtraData->Elements(),
+ mConfig.mExtraData->Length());
+ } else if (VPXDecoder::IsVP8(mConfig.mMimeType)) {
+ codec.mCodecType = kGMPVideoCodecVP8;
+ } else if (VPXDecoder::IsVP9(mConfig.mMimeType)) {
+ codec.mCodecType = kGMPVideoCodecVP9;
+ } else {
+ // Unrecognized mime type
+ aGMP->Close();
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+ codec.mWidth = mConfig.mImage.width;
+ codec.mHeight = mConfig.mImage.height;
+
+ nsresult rv = aGMP->InitDecode(codec,
+ codecSpecific,
+ mAdapter,
+ PR_GetNumberOfProcessors());
+ if (NS_FAILED(rv)) {
+ aGMP->Close();
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+
+ mGMP = aGMP;
+ mHost = aHost;
+
+ // GMP implementations have interpreted the meaning of GMP_BufferLength32
+ // differently. The OpenH264 GMP expects GMP_BufferLength32 to behave as
+ // specified in the GMP API, where each buffer is prefixed by a 32-bit
+ // host-endian buffer length that includes the size of the buffer length
+ // field. Other existing GMPs currently expect GMP_BufferLength32 (when
+ // combined with kGMPVideoCodecH264) to mean "like AVCC but restricted to
+ // 4-byte NAL lengths" (i.e. buffer lengths are specified in big-endian
+ // and do not include the length of the buffer length field.
+ mConvertNALUnitLengths = mGMP->GetDisplayName().EqualsLiteral("gmpopenh264");
+
+ mInitPromise.Resolve(TrackInfo::kVideoTrack, __func__);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+GMPVideoDecoder::Init()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ mMPS = do_GetService("@mozilla.org/gecko-media-plugin-service;1");
+ MOZ_ASSERT(mMPS);
+
+ RefPtr<InitPromise> promise(mInitPromise.Ensure(__func__));
+
+ nsTArray<nsCString> tags;
+ InitTags(tags);
+ UniquePtr<GetGMPVideoDecoderCallback> callback(new GMPInitDoneCallback(this));
+ if (NS_FAILED(mMPS->GetDecryptingGMPVideoDecoder(mCrashHelper,
+ &tags,
+ GetNodeId(),
+ Move(callback),
+ DecryptorId()))) {
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return promise;
+}
+
+void
+GMPVideoDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ RefPtr<MediaRawData> sample(aSample);
+ if (!mGMP) {
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("mGMP not initialized")));
+ return;
+ }
+
+ mAdapter->SetLastStreamOffset(sample->mOffset);
+
+ GMPUniquePtr<GMPVideoEncodedFrame> frame = CreateFrame(sample);
+ if (!frame) {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CreateFrame returned null")));
+ return;
+ }
+ nsTArray<uint8_t> info; // No codec specific per-frame info to pass.
+ nsresult rv = mGMP->Decode(Move(frame), false, info, 0);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("mGMP->Decode:%x", rv)));
+ }
+}
+
+void
+GMPVideoDecoder::Flush()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (!mGMP || NS_FAILED(mGMP->Reset())) {
+ // Abort the flush.
+ mCallback->FlushComplete();
+ }
+}
+
+void
+GMPVideoDecoder::Drain()
+{
+ MOZ_ASSERT(IsOnGMPThread());
+
+ if (!mGMP || NS_FAILED(mGMP->Drain())) {
+ mCallback->DrainComplete();
+ }
+}
+
+void
+GMPVideoDecoder::Shutdown()
+{
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+ // Note that this *may* be called from the proxy thread also.
+ if (!mGMP) {
+ return;
+ }
+ // Note this unblocks flush and drain operations waiting for callbacks.
+ mGMP->Close();
+ mGMP = nullptr;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
new file mode 100644
index 000000000..900ef4553
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
@@ -0,0 +1,122 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GMPVideoDecoder_h_)
+#define GMPVideoDecoder_h_
+
+#include "GMPVideoDecoderProxy.h"
+#include "ImageContainer.h"
+#include "MediaDataDecoderProxy.h"
+#include "PlatformDecoderModule.h"
+#include "mozIGeckoMediaPluginService.h"
+#include "MediaInfo.h"
+
+namespace mozilla {
+
+class VideoCallbackAdapter : public GMPVideoDecoderCallbackProxy {
+public:
+ VideoCallbackAdapter(MediaDataDecoderCallbackProxy* aCallback,
+ VideoInfo aVideoInfo,
+ layers::ImageContainer* aImageContainer)
+ : mCallback(aCallback)
+ , mLastStreamOffset(0)
+ , mVideoInfo(aVideoInfo)
+ , mImageContainer(aImageContainer)
+ {}
+
+ MediaDataDecoderCallbackProxy* Callback() const { return mCallback; }
+
+ // GMPVideoDecoderCallbackProxy
+ void Decoded(GMPVideoi420Frame* aDecodedFrame) override;
+ void ReceivedDecodedReferenceFrame(const uint64_t aPictureId) override;
+ void ReceivedDecodedFrame(const uint64_t aPictureId) override;
+ void InputDataExhausted() override;
+ void DrainComplete() override;
+ void ResetComplete() override;
+ void Error(GMPErr aErr) override;
+ void Terminated() override;
+
+ void SetLastStreamOffset(int64_t aStreamOffset) {
+ mLastStreamOffset = aStreamOffset;
+ }
+
+private:
+ MediaDataDecoderCallbackProxy* mCallback;
+ int64_t mLastStreamOffset;
+
+ VideoInfo mVideoInfo;
+ RefPtr<layers::ImageContainer> mImageContainer;
+};
+
+struct GMPVideoDecoderParams {
+ explicit GMPVideoDecoderParams(const CreateDecoderParams& aParams);
+ GMPVideoDecoderParams& WithCallback(MediaDataDecoderProxy* aWrapper);
+ GMPVideoDecoderParams& WithAdapter(VideoCallbackAdapter* aAdapter);
+
+ const VideoInfo& mConfig;
+ TaskQueue* mTaskQueue;
+ MediaDataDecoderCallbackProxy* mCallback;
+ VideoCallbackAdapter* mAdapter;
+ layers::ImageContainer* mImageContainer;
+ layers::LayersBackend mLayersBackend;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+};
+
+class GMPVideoDecoder : public MediaDataDecoder {
+public:
+ explicit GMPVideoDecoder(const GMPVideoDecoderParams& aParams);
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ const char* GetDescriptionName() const override
+ {
+ return "GMP video decoder";
+ }
+
+protected:
+ virtual void InitTags(nsTArray<nsCString>& aTags);
+ virtual nsCString GetNodeId();
+ virtual uint32_t DecryptorId() const { return 0; }
+ virtual GMPUniquePtr<GMPVideoEncodedFrame> CreateFrame(MediaRawData* aSample);
+ virtual const VideoInfo& GetConfig() const;
+
+private:
+
+ class GMPInitDoneCallback : public GetGMPVideoDecoderCallback
+ {
+ public:
+ explicit GMPInitDoneCallback(GMPVideoDecoder* aDecoder)
+ : mDecoder(aDecoder)
+ {
+ }
+
+ void Done(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost) override
+ {
+ mDecoder->GMPInitDone(aGMP, aHost);
+ }
+
+ private:
+ RefPtr<GMPVideoDecoder> mDecoder;
+ };
+ void GMPInitDone(GMPVideoDecoderProxy* aGMP, GMPVideoHost* aHost);
+
+ const VideoInfo mConfig;
+ MediaDataDecoderCallbackProxy* mCallback;
+ nsCOMPtr<mozIGeckoMediaPluginService> mMPS;
+ GMPVideoDecoderProxy* mGMP;
+ GMPVideoHost* mHost;
+ nsAutoPtr<VideoCallbackAdapter> mAdapter;
+ bool mConvertNALUnitLengths;
+ MozPromiseHolder<InitPromise> mInitPromise;
+ RefPtr<GMPCrashHelper> mCrashHelper;
+};
+
+} // namespace mozilla
+
+#endif // GMPVideoDecoder_h_
diff --git a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp
new file mode 100644
index 000000000..5a196f8e6
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.cpp
@@ -0,0 +1,90 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaDataDecoderProxy.h"
+#include "MediaData.h"
+
+namespace mozilla {
+
+void
+MediaDataDecoderCallbackProxy::Error(const MediaResult& aError)
+{
+ mProxyCallback->Error(aError);
+}
+
+void
+MediaDataDecoderCallbackProxy::FlushComplete()
+{
+ mProxyDecoder->FlushComplete();
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+MediaDataDecoderProxy::InternalInit()
+{
+ return mProxyDecoder->Init();
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+MediaDataDecoderProxy::Init()
+{
+ MOZ_ASSERT(!mIsShutdown);
+
+ return InvokeAsync(mProxyThread, this, __func__,
+ &MediaDataDecoderProxy::InternalInit);
+}
+
+void
+MediaDataDecoderProxy::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(!IsOnProxyThread());
+ MOZ_ASSERT(!mIsShutdown);
+
+ nsCOMPtr<nsIRunnable> task(new InputTask(mProxyDecoder, aSample));
+ mProxyThread->Dispatch(task.forget());
+}
+
+void
+MediaDataDecoderProxy::Flush()
+{
+ MOZ_ASSERT(!IsOnProxyThread());
+ MOZ_ASSERT(!mIsShutdown);
+
+ mFlushComplete.Set(false);
+
+ mProxyThread->Dispatch(NewRunnableMethod(mProxyDecoder, &MediaDataDecoder::Flush));
+
+ mFlushComplete.WaitUntil(true);
+}
+
+void
+MediaDataDecoderProxy::Drain()
+{
+ MOZ_ASSERT(!IsOnProxyThread());
+ MOZ_ASSERT(!mIsShutdown);
+
+ mProxyThread->Dispatch(NewRunnableMethod(mProxyDecoder, &MediaDataDecoder::Drain));
+}
+
+void
+MediaDataDecoderProxy::Shutdown()
+{
+ // Note that this *may* be called from the proxy thread also.
+ MOZ_ASSERT(!mIsShutdown);
+#if defined(DEBUG)
+ mIsShutdown = true;
+#endif
+ mProxyThread->AsXPCOMThread()->Dispatch(NewRunnableMethod(mProxyDecoder,
+ &MediaDataDecoder::Shutdown),
+ NS_DISPATCH_SYNC);
+}
+
+void
+MediaDataDecoderProxy::FlushComplete()
+{
+ mFlushComplete.Set(true);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h
new file mode 100644
index 000000000..735b6126e
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/MediaDataDecoderProxy.h
@@ -0,0 +1,181 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MediaDataDecoderProxy_h_)
+#define MediaDataDecoderProxy_h_
+
+#include "PlatformDecoderModule.h"
+#include "mozilla/RefPtr.h"
+#include "nsThreadUtils.h"
+#include "nscore.h"
+#include "GMPService.h"
+
+namespace mozilla {
+
+class InputTask : public Runnable {
+public:
+ InputTask(MediaDataDecoder* aDecoder,
+ MediaRawData* aSample)
+ : mDecoder(aDecoder)
+ , mSample(aSample)
+ {}
+
+ NS_IMETHOD Run() override {
+ mDecoder->Input(mSample);
+ return NS_OK;
+ }
+
+private:
+ RefPtr<MediaDataDecoder> mDecoder;
+ RefPtr<MediaRawData> mSample;
+};
+
+template<typename T>
+class Condition {
+public:
+ explicit Condition(T aValue)
+ : mMonitor("Condition")
+ , mCondition(aValue)
+ {}
+
+ void Set(T aValue) {
+ MonitorAutoLock mon(mMonitor);
+ mCondition = aValue;
+ mon.NotifyAll();
+ }
+
+ void WaitUntil(T aValue) {
+ MonitorAutoLock mon(mMonitor);
+ while (mCondition != aValue) {
+ mon.Wait();
+ }
+ }
+
+private:
+ Monitor mMonitor;
+ T mCondition;
+};
+
+class MediaDataDecoderProxy;
+
+class MediaDataDecoderCallbackProxy : public MediaDataDecoderCallback {
+public:
+ MediaDataDecoderCallbackProxy(MediaDataDecoderProxy* aProxyDecoder,
+ MediaDataDecoderCallback* aCallback)
+ : mProxyDecoder(aProxyDecoder)
+ , mProxyCallback(aCallback)
+ {
+ }
+
+ void Output(MediaData* aData) override {
+ mProxyCallback->Output(aData);
+ }
+
+ void Error(const MediaResult& aError) override;
+
+ void InputExhausted() override {
+ mProxyCallback->InputExhausted();
+ }
+
+ void DrainComplete() override {
+ mProxyCallback->DrainComplete();
+ }
+
+ void ReleaseMediaResources() override {
+ mProxyCallback->ReleaseMediaResources();
+ }
+
+ void FlushComplete();
+
+ bool OnReaderTaskQueue() override
+ {
+ return mProxyCallback->OnReaderTaskQueue();
+ }
+
+ void WaitingForKey() override
+ {
+ mProxyCallback->WaitingForKey();
+ }
+
+private:
+ MediaDataDecoderProxy* mProxyDecoder;
+ MediaDataDecoderCallback* mProxyCallback;
+};
+
+class MediaDataDecoderProxy : public MediaDataDecoder {
+public:
+ MediaDataDecoderProxy(already_AddRefed<AbstractThread> aProxyThread,
+ MediaDataDecoderCallback* aCallback)
+ : mProxyThread(aProxyThread)
+ , mProxyCallback(this, aCallback)
+ , mFlushComplete(false)
+#if defined(DEBUG)
+ , mIsShutdown(false)
+#endif
+ {
+ }
+
+ // Ideally, this would return a regular MediaDataDecoderCallback pointer
+ // to retain the clean abstraction, but until MediaDataDecoderCallback
+ // supports the FlushComplete interface, this will have to do. When MDDC
+ // supports FlushComplete, this, the GMP*Decoders, and the
+ // *CallbackAdapters can be reverted to accepting a regular
+ // MediaDataDecoderCallback pointer.
+ MediaDataDecoderCallbackProxy* Callback()
+ {
+ return &mProxyCallback;
+ }
+
+ void SetProxyTarget(MediaDataDecoder* aProxyDecoder)
+ {
+ MOZ_ASSERT(aProxyDecoder);
+ mProxyDecoder = aProxyDecoder;
+ }
+
+ // These are called from the decoder thread pool.
+ // Init and Shutdown run synchronously on the proxy thread, all others are
+ // asynchronously and responded to via the MediaDataDecoderCallback.
+ // Note: the nsresults returned by the proxied decoder are lost.
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+
+ const char* GetDescriptionName() const override
+ {
+ return "GMP proxy data decoder";
+ }
+
+ // Called by MediaDataDecoderCallbackProxy.
+ void FlushComplete();
+
+private:
+ RefPtr<InitPromise> InternalInit();
+
+#ifdef DEBUG
+ bool IsOnProxyThread() {
+ return mProxyThread && mProxyThread->IsCurrentThreadIn();
+ }
+#endif
+
+ friend class InputTask;
+ friend class InitTask;
+
+ RefPtr<MediaDataDecoder> mProxyDecoder;
+ RefPtr<AbstractThread> mProxyThread;
+
+ MediaDataDecoderCallbackProxy mProxyCallback;
+
+ Condition<bool> mFlushComplete;
+#if defined(DEBUG)
+ bool mIsShutdown;
+#endif
+};
+
+} // namespace mozilla
+
+#endif // MediaDataDecoderProxy_h_
diff --git a/dom/media/platforms/agnostic/gmp/moz.build b/dom/media/platforms/agnostic/gmp/moz.build
new file mode 100644
index 000000000..eb2738e26
--- /dev/null
+++ b/dom/media/platforms/agnostic/gmp/moz.build
@@ -0,0 +1,24 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'GMPAudioDecoder.h',
+ 'GMPDecoderModule.h',
+ 'GMPVideoDecoder.h',
+ 'MediaDataDecoderProxy.h',
+]
+
+UNIFIED_SOURCES += [
+ 'GMPAudioDecoder.cpp',
+ 'GMPDecoderModule.cpp',
+ 'GMPVideoDecoder.cpp',
+ 'MediaDataDecoderProxy.cpp',
+]
+
+# GMPVideoEncodedFrameImpl.h needs IPC
+include('/ipc/chromium/chromium-config.mozbuild')
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/platforms/android/AndroidDecoderModule.cpp b/dom/media/platforms/android/AndroidDecoderModule.cpp
new file mode 100644
index 000000000..3bf5cbf09
--- /dev/null
+++ b/dom/media/platforms/android/AndroidDecoderModule.cpp
@@ -0,0 +1,227 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AndroidDecoderModule.h"
+#include "AndroidBridge.h"
+
+#include "MediaCodecDataDecoder.h"
+#include "RemoteDataDecoder.h"
+
+#include "MediaInfo.h"
+#include "VPXDecoder.h"
+
+#include "MediaPrefs.h"
+#include "OpusDecoder.h"
+#include "VorbisDecoder.h"
+
+#include "nsPromiseFlatString.h"
+#include "nsIGfxInfo.h"
+
+#include "prlog.h"
+
+#include <jni.h>
+
+#undef LOG
+#define LOG(arg, ...) MOZ_LOG(sAndroidDecoderModuleLog, \
+ mozilla::LogLevel::Debug, ("AndroidDecoderModule(%p)::%s: " arg, \
+ this, __func__, ##__VA_ARGS__))
+
+using namespace mozilla;
+using namespace mozilla::gl;
+using namespace mozilla::java::sdk;
+using media::TimeUnit;
+
+namespace mozilla {
+
+mozilla::LazyLogModule sAndroidDecoderModuleLog("AndroidDecoderModule");
+
+static const char*
+TranslateMimeType(const nsACString& aMimeType)
+{
+ if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8)) {
+ return "video/x-vnd.on2.vp8";
+ } else if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)) {
+ return "video/x-vnd.on2.vp9";
+ }
+ return PromiseFlatCString(aMimeType).get();
+}
+
+static bool
+GetFeatureStatus(int32_t aFeature)
+{
+ nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
+ int32_t status = nsIGfxInfo::FEATURE_STATUS_UNKNOWN;
+ nsCString discardFailureId;
+ if (!gfxInfo || NS_FAILED(gfxInfo->GetFeatureStatus(aFeature, discardFailureId, &status))) {
+ return false;
+ }
+ return status == nsIGfxInfo::FEATURE_STATUS_OK;
+};
+
+CryptoInfo::LocalRef
+GetCryptoInfoFromSample(const MediaRawData* aSample)
+{
+ auto& cryptoObj = aSample->mCrypto;
+
+ if (!cryptoObj.mValid) {
+ return nullptr;
+ }
+
+ CryptoInfo::LocalRef cryptoInfo;
+ nsresult rv = CryptoInfo::New(&cryptoInfo);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+ uint32_t numSubSamples =
+ std::min<uint32_t>(cryptoObj.mPlainSizes.Length(), cryptoObj.mEncryptedSizes.Length());
+
+ uint32_t totalSubSamplesSize = 0;
+ for (auto& size : cryptoObj.mEncryptedSizes) {
+ totalSubSamplesSize += size;
+ }
+
+ // mPlainSizes is uint16_t, need to transform to uint32_t first.
+ nsTArray<uint32_t> plainSizes;
+ for (auto& size : cryptoObj.mPlainSizes) {
+ totalSubSamplesSize += size;
+ plainSizes.AppendElement(size);
+ }
+
+ uint32_t codecSpecificDataSize = aSample->Size() - totalSubSamplesSize;
+ // Size of codec specific data("CSD") for Android MediaCodec usage should be
+ // included in the 1st plain size.
+ plainSizes[0] += codecSpecificDataSize;
+
+ static const int kExpectedIVLength = 16;
+ auto tempIV(cryptoObj.mIV);
+ auto tempIVLength = tempIV.Length();
+ MOZ_ASSERT(tempIVLength <= kExpectedIVLength);
+ for (size_t i = tempIVLength; i < kExpectedIVLength; i++) {
+ // Padding with 0
+ tempIV.AppendElement(0);
+ }
+
+ auto numBytesOfPlainData = mozilla::jni::IntArray::New(
+ reinterpret_cast<int32_t*>(&plainSizes[0]),
+ plainSizes.Length());
+
+ auto numBytesOfEncryptedData =
+ mozilla::jni::IntArray::New(reinterpret_cast<const int32_t*>(&cryptoObj.mEncryptedSizes[0]),
+ cryptoObj.mEncryptedSizes.Length());
+ auto iv = mozilla::jni::ByteArray::New(reinterpret_cast<int8_t*>(&tempIV[0]),
+ tempIV.Length());
+ auto keyId = mozilla::jni::ByteArray::New(reinterpret_cast<const int8_t*>(&cryptoObj.mKeyId[0]),
+ cryptoObj.mKeyId.Length());
+ cryptoInfo->Set(numSubSamples,
+ numBytesOfPlainData,
+ numBytesOfEncryptedData,
+ keyId,
+ iv,
+ MediaCodec::CRYPTO_MODE_AES_CTR);
+
+ return cryptoInfo;
+}
+
+bool
+AndroidDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ if (!AndroidBridge::Bridge() ||
+ AndroidBridge::Bridge()->GetAPIVersion() < 16) {
+ return false;
+ }
+
+ if (aMimeType.EqualsLiteral("video/mp4") ||
+ aMimeType.EqualsLiteral("video/avc")) {
+ return true;
+ }
+
+ // When checking "audio/x-wav", CreateDecoder can cause a JNI ERROR by
+ // Accessing a stale local reference leading to a SIGSEGV crash.
+ // To avoid this we check for wav types here.
+ if (aMimeType.EqualsLiteral("audio/x-wav") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=6") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=7") ||
+ aMimeType.EqualsLiteral("audio/wave; codecs=65534")) {
+ return false;
+ }
+
+ if ((VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8) &&
+ !GetFeatureStatus(nsIGfxInfo::FEATURE_VP8_HW_DECODE)) ||
+ (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9) &&
+ !GetFeatureStatus(nsIGfxInfo::FEATURE_VP9_HW_DECODE))) {
+ return false;
+ }
+
+ // Prefer the gecko decoder for opus and vorbis; stagefright crashes
+ // on content demuxed from mp4.
+ if (OpusDataDecoder::IsOpus(aMimeType) ||
+ VorbisDataDecoder::IsVorbis(aMimeType)) {
+ LOG("Rejecting audio of type %s", aMimeType.Data());
+ return false;
+ }
+
+ return java::HardwareCodecCapabilityUtils::FindDecoderCodecInfoForMimeType(
+ nsCString(TranslateMimeType(aMimeType)));
+}
+
+already_AddRefed<MediaDataDecoder>
+AndroidDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ MediaFormat::LocalRef format;
+
+ const VideoInfo& config = aParams.VideoConfig();
+ NS_ENSURE_SUCCESS(MediaFormat::CreateVideoFormat(
+ TranslateMimeType(config.mMimeType),
+ config.mDisplay.width,
+ config.mDisplay.height,
+ &format), nullptr);
+
+ RefPtr<MediaDataDecoder> decoder = MediaPrefs::PDMAndroidRemoteCodecEnabled() ?
+ RemoteDataDecoder::CreateVideoDecoder(config,
+ format,
+ aParams.mCallback,
+ aParams.mImageContainer) :
+ MediaCodecDataDecoder::CreateVideoDecoder(config,
+ format,
+ aParams.mCallback,
+ aParams.mImageContainer);
+
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+AndroidDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ const AudioInfo& config = aParams.AudioConfig();
+ MOZ_ASSERT(config.mBitDepth == 16, "We only handle 16-bit audio!");
+
+ MediaFormat::LocalRef format;
+
+ LOG("CreateAudioFormat with mimeType=%s, mRate=%d, channels=%d",
+ config.mMimeType.Data(), config.mRate, config.mChannels);
+
+ NS_ENSURE_SUCCESS(MediaFormat::CreateAudioFormat(
+ config.mMimeType,
+ config.mRate,
+ config.mChannels,
+ &format), nullptr);
+
+ RefPtr<MediaDataDecoder> decoder = MediaPrefs::PDMAndroidRemoteCodecEnabled() ?
+ RemoteDataDecoder::CreateAudioDecoder(config, format, aParams.mCallback) :
+ MediaCodecDataDecoder::CreateAudioDecoder(config, format, aParams.mCallback);
+
+ return decoder.forget();
+}
+
+PlatformDecoderModule::ConversionRequired
+AndroidDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ if (aConfig.IsVideo()) {
+ return ConversionRequired::kNeedAnnexB;
+ }
+ return ConversionRequired::kNeedNone;
+}
+
+} // mozilla
diff --git a/dom/media/platforms/android/AndroidDecoderModule.h b/dom/media/platforms/android/AndroidDecoderModule.h
new file mode 100644
index 000000000..339cbb311
--- /dev/null
+++ b/dom/media/platforms/android/AndroidDecoderModule.h
@@ -0,0 +1,34 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AndroidDecoderModule_h_
+#define AndroidDecoderModule_h_
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class AndroidDecoderModule : public PlatformDecoderModule {
+public:
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ AndroidDecoderModule() {}
+ virtual ~AndroidDecoderModule() {}
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+};
+
+extern LazyLogModule sAndroidDecoderModuleLog;
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/android/MediaCodecDataDecoder.cpp b/dom/media/platforms/android/MediaCodecDataDecoder.cpp
new file mode 100644
index 000000000..3c5df54ee
--- /dev/null
+++ b/dom/media/platforms/android/MediaCodecDataDecoder.cpp
@@ -0,0 +1,698 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MediaCodecDataDecoder.h"
+
+#include "AndroidBridge.h"
+#include "AndroidSurfaceTexture.h"
+#include "GeneratedJNINatives.h"
+#include "GLImages.h"
+
+#include "MediaData.h"
+#include "MediaInfo.h"
+#include "VPXDecoder.h"
+
+#include "nsThreadUtils.h"
+#include "nsPromiseFlatString.h"
+#include "nsIGfxInfo.h"
+
+#include "prlog.h"
+
+#include <jni.h>
+
+#undef LOG
+#define LOG(arg, ...) MOZ_LOG(sAndroidDecoderModuleLog, \
+ mozilla::LogLevel::Debug, ("MediaCodecDataDecoder(%p)::%s: " arg, \
+ this, __func__, ##__VA_ARGS__))
+
+using namespace mozilla;
+using namespace mozilla::gl;
+using namespace mozilla::java;
+using namespace mozilla::java::sdk;
+using media::TimeUnit;
+
+namespace mozilla {
+
+#define INVOKE_CALLBACK(Func, ...) \
+ if (mCallback) { \
+ mCallback->Func(__VA_ARGS__); \
+ } else { \
+ NS_WARNING("Callback not set"); \
+ }
+
+static MediaCodec::LocalRef
+CreateDecoder(const nsACString& aMimeType)
+{
+ MediaCodec::LocalRef codec;
+ NS_ENSURE_SUCCESS(MediaCodec::CreateDecoderByType(TranslateMimeType(aMimeType),
+ &codec), nullptr);
+ return codec;
+}
+
+class VideoDataDecoder : public MediaCodecDataDecoder
+{
+public:
+ VideoDataDecoder(const VideoInfo& aConfig,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer)
+ : MediaCodecDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
+ aFormat, aCallback)
+ , mImageContainer(aImageContainer)
+ , mConfig(aConfig)
+ {
+
+ }
+
+ const char* GetDescriptionName() const override
+ {
+ return "Android MediaCodec video decoder";
+ }
+
+ RefPtr<InitPromise> Init() override
+ {
+ mSurfaceTexture = AndroidSurfaceTexture::Create();
+ if (!mSurfaceTexture) {
+ NS_WARNING("Failed to create SurfaceTexture for video decode\n");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ if (NS_FAILED(InitDecoder(mSurfaceTexture->JavaSurface()))) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+ }
+
+ void Cleanup() override
+ {
+ }
+
+ nsresult PostOutput(BufferInfo::Param aInfo, MediaFormat::Param aFormat,
+ const TimeUnit& aDuration) override
+ {
+ RefPtr<layers::Image> img =
+ new SurfaceTextureImage(mSurfaceTexture.get(), mConfig.mDisplay,
+ gl::OriginPos::BottomLeft);
+
+ nsresult rv;
+ int32_t flags;
+ NS_ENSURE_SUCCESS(rv = aInfo->Flags(&flags), rv);
+
+ bool isSync = !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME);
+
+ int32_t offset;
+ NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
+
+ int64_t presentationTimeUs;
+ NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
+
+ RefPtr<VideoData> v =
+ VideoData::CreateFromImage(mConfig,
+ offset,
+ presentationTimeUs,
+ aDuration.ToMicroseconds(),
+ img,
+ isSync,
+ presentationTimeUs,
+ gfx::IntRect(0, 0,
+ mConfig.mDisplay.width,
+ mConfig.mDisplay.height));
+ INVOKE_CALLBACK(Output, v);
+ return NS_OK;
+ }
+
+protected:
+ layers::ImageContainer* mImageContainer;
+ const VideoInfo& mConfig;
+ RefPtr<AndroidSurfaceTexture> mSurfaceTexture;
+};
+
+class AudioDataDecoder : public MediaCodecDataDecoder
+{
+public:
+ AudioDataDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback)
+ : MediaCodecDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
+ aFormat, aCallback)
+ {
+ JNIEnv* const env = jni::GetEnvForThread();
+
+ jni::ByteBuffer::LocalRef buffer(env);
+ NS_ENSURE_SUCCESS_VOID(aFormat->GetByteBuffer(NS_LITERAL_STRING("csd-0"),
+ &buffer));
+
+ if (!buffer && aConfig.mCodecSpecificConfig->Length() >= 2) {
+ buffer = jni::ByteBuffer::New(
+ aConfig.mCodecSpecificConfig->Elements(),
+ aConfig.mCodecSpecificConfig->Length());
+ NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"),
+ buffer));
+ }
+ }
+
+ const char* GetDescriptionName() const override
+ {
+ return "android audio decoder";
+ }
+
+ nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
+ MediaFormat::Param aFormat, const TimeUnit& aDuration)
+ {
+ // The output on Android is always 16-bit signed
+ nsresult rv;
+ int32_t numChannels;
+ NS_ENSURE_SUCCESS(rv =
+ aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &numChannels), rv);
+ AudioConfig::ChannelLayout layout(numChannels);
+ if (!layout.IsValid()) {
+ return NS_ERROR_FAILURE;
+ }
+
+ int32_t sampleRate;
+ NS_ENSURE_SUCCESS(rv =
+ aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate), rv);
+
+ int32_t size;
+ NS_ENSURE_SUCCESS(rv = aInfo->Size(&size), rv);
+
+ int32_t offset;
+ NS_ENSURE_SUCCESS(rv = aInfo->Offset(&offset), rv);
+
+#ifdef MOZ_SAMPLE_TYPE_S16
+ const int32_t numSamples = size / 2;
+#else
+#error We only support 16-bit integer PCM
+#endif
+
+ const int32_t numFrames = numSamples / numChannels;
+ AlignedAudioBuffer audio(numSamples);
+ if (!audio) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ const uint8_t* bufferStart = static_cast<uint8_t*>(aBuffer) + offset;
+ PodCopy(audio.get(), reinterpret_cast<const AudioDataValue*>(bufferStart),
+ numSamples);
+
+ int64_t presentationTimeUs;
+ NS_ENSURE_SUCCESS(rv = aInfo->PresentationTimeUs(&presentationTimeUs), rv);
+
+ RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
+ aDuration.ToMicroseconds(),
+ numFrames,
+ Move(audio),
+ numChannels,
+ sampleRate);
+ INVOKE_CALLBACK(Output, data);
+ return NS_OK;
+ }
+};
+
+MediaDataDecoder*
+MediaCodecDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback)
+{
+ return new AudioDataDecoder(aConfig, aFormat, aCallback);
+}
+
+MediaDataDecoder*
+MediaCodecDataDecoder::CreateVideoDecoder(const VideoInfo& aConfig,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer)
+{
+ return new VideoDataDecoder(aConfig, aFormat, aCallback, aImageContainer);
+}
+
+MediaCodecDataDecoder::MediaCodecDataDecoder(MediaData::Type aType,
+ const nsACString& aMimeType,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback)
+ : mType(aType)
+ , mMimeType(aMimeType)
+ , mFormat(aFormat)
+ , mCallback(aCallback)
+ , mInputBuffers(nullptr)
+ , mOutputBuffers(nullptr)
+ , mMonitor("MediaCodecDataDecoder::mMonitor")
+ , mState(ModuleState::kDecoding)
+{
+
+}
+
+MediaCodecDataDecoder::~MediaCodecDataDecoder()
+{
+ Shutdown();
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+MediaCodecDataDecoder::Init()
+{
+ nsresult rv = InitDecoder(nullptr);
+
+ TrackInfo::TrackType type =
+ (mType == MediaData::AUDIO_DATA ? TrackInfo::TrackType::kAudioTrack
+ : TrackInfo::TrackType::kVideoTrack);
+
+ return NS_SUCCEEDED(rv) ?
+ InitPromise::CreateAndResolve(type, __func__) :
+ InitPromise::CreateAndReject(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+}
+
+nsresult
+MediaCodecDataDecoder::InitDecoder(Surface::Param aSurface)
+{
+ mDecoder = CreateDecoder(mMimeType);
+ if (!mDecoder) {
+ INVOKE_CALLBACK(Error,
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__));
+ return NS_ERROR_FAILURE;
+ }
+
+ nsresult rv;
+ NS_ENSURE_SUCCESS(rv = mDecoder->Configure(mFormat, aSurface, nullptr, 0), rv);
+ NS_ENSURE_SUCCESS(rv = mDecoder->Start(), rv);
+
+ NS_ENSURE_SUCCESS(rv = ResetInputBuffers(), rv);
+ NS_ENSURE_SUCCESS(rv = ResetOutputBuffers(), rv);
+
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod(this, &MediaCodecDataDecoder::DecoderLoop);
+ rv = NS_NewNamedThread("MC Decoder", getter_AddRefs(mThread), r);
+
+ return rv;
+}
+
+// This is in usec, so that's 10ms.
+static const int64_t kDecoderTimeout = 10000;
+
+#define BREAK_ON_DECODER_ERROR() \
+ if (NS_FAILED(res)) { \
+ NS_WARNING("Exiting decoder loop due to exception"); \
+ if (mState == ModuleState::kDrainDecoder) { \
+ INVOKE_CALLBACK(DrainComplete); \
+ SetState(ModuleState::kDecoding); \
+ } \
+ INVOKE_CALLBACK(Error, MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__)); \
+ break; \
+ }
+
+nsresult
+MediaCodecDataDecoder::GetInputBuffer(
+ JNIEnv* aEnv, int aIndex, jni::Object::LocalRef* aBuffer)
+{
+ MOZ_ASSERT(aEnv);
+ MOZ_ASSERT(!*aBuffer);
+
+ int numTries = 2;
+
+ while (numTries--) {
+ *aBuffer = jni::Object::LocalRef::Adopt(
+ aEnv->GetObjectArrayElement(mInputBuffers.Get(), aIndex));
+ if (*aBuffer) {
+ return NS_OK;
+ }
+ nsresult res = ResetInputBuffers();
+ if (NS_FAILED(res)) {
+ return res;
+ }
+ }
+ return NS_ERROR_FAILURE;
+}
+
+bool
+MediaCodecDataDecoder::WaitForInput()
+{
+ MonitorAutoLock lock(mMonitor);
+
+ while (mState == ModuleState::kDecoding && mQueue.empty()) {
+ // Signal that we require more input.
+ INVOKE_CALLBACK(InputExhausted);
+ lock.Wait();
+ }
+
+ return mState != ModuleState::kStopping;
+}
+
+
+already_AddRefed<MediaRawData>
+MediaCodecDataDecoder::PeekNextSample()
+{
+ MonitorAutoLock lock(mMonitor);
+
+ if (mState == ModuleState::kFlushing) {
+ mDecoder->Flush();
+ ClearQueue();
+ SetState(ModuleState::kDecoding);
+ lock.Notify();
+ return nullptr;
+ }
+
+ if (mQueue.empty()) {
+ if (mState == ModuleState::kDrainQueue) {
+ SetState(ModuleState::kDrainDecoder);
+ }
+ return nullptr;
+ }
+
+ // We're not stopping or flushing, so try to get a sample.
+ return RefPtr<MediaRawData>(mQueue.front()).forget();
+}
+
+nsresult
+MediaCodecDataDecoder::QueueSample(const MediaRawData* aSample)
+{
+ MOZ_ASSERT(aSample);
+ AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
+
+ // We have a sample, try to feed it to the decoder.
+ int32_t inputIndex = -1;
+ nsresult res = mDecoder->DequeueInputBuffer(kDecoderTimeout, &inputIndex);
+ if (NS_FAILED(res)) {
+ return res;
+ }
+
+ if (inputIndex < 0) {
+ // There is no valid input buffer available.
+ return NS_ERROR_FAILURE;
+ }
+
+ jni::Object::LocalRef buffer(frame.GetEnv());
+ res = GetInputBuffer(frame.GetEnv(), inputIndex, &buffer);
+ if (NS_FAILED(res)) {
+ return res;
+ }
+
+ void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
+
+ MOZ_ASSERT(frame.GetEnv()->GetDirectBufferCapacity(buffer.Get()) >=
+ aSample->Size(),
+ "Decoder buffer is not large enough for sample");
+
+ PodCopy(static_cast<uint8_t*>(directBuffer), aSample->Data(), aSample->Size());
+
+ CryptoInfo::LocalRef cryptoInfo = GetCryptoInfoFromSample(aSample);
+ if (cryptoInfo) {
+ res = mDecoder->QueueSecureInputBuffer(inputIndex, 0, cryptoInfo,
+ aSample->mTime, 0);
+ } else {
+ res = mDecoder->QueueInputBuffer(inputIndex, 0, aSample->Size(),
+ aSample->mTime, 0);
+ }
+
+ if (NS_FAILED(res)) {
+ return res;
+ }
+
+ mDurations.push_back(TimeUnit::FromMicroseconds(aSample->mDuration));
+ return NS_OK;
+}
+
+nsresult
+MediaCodecDataDecoder::QueueEOS()
+{
+ mMonitor.AssertCurrentThreadOwns();
+
+ nsresult res = NS_OK;
+ int32_t inputIndex = -1;
+ res = mDecoder->DequeueInputBuffer(kDecoderTimeout, &inputIndex);
+ if (NS_FAILED(res) || inputIndex < 0) {
+ return res;
+ }
+
+ res = mDecoder->QueueInputBuffer(inputIndex, 0, 0, 0,
+ MediaCodec::BUFFER_FLAG_END_OF_STREAM);
+ if (NS_SUCCEEDED(res)) {
+ SetState(ModuleState::kDrainWaitEOS);
+ mMonitor.Notify();
+ }
+ return res;
+}
+
+void
+MediaCodecDataDecoder::HandleEOS(int32_t aOutputStatus)
+{
+ MonitorAutoLock lock(mMonitor);
+
+ if (mState == ModuleState::kDrainWaitEOS) {
+ SetState(ModuleState::kDecoding);
+ mMonitor.Notify();
+
+ INVOKE_CALLBACK(DrainComplete);
+ }
+
+ mDecoder->ReleaseOutputBuffer(aOutputStatus, false);
+}
+
+Maybe<TimeUnit>
+MediaCodecDataDecoder::GetOutputDuration()
+{
+ if (mDurations.empty()) {
+ return Nothing();
+ }
+ const Maybe<TimeUnit> duration = Some(mDurations.front());
+ mDurations.pop_front();
+ return duration;
+}
+
+nsresult
+MediaCodecDataDecoder::ProcessOutput(
+ BufferInfo::Param aInfo, MediaFormat::Param aFormat, int32_t aStatus)
+{
+ AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
+
+ const Maybe<TimeUnit> duration = GetOutputDuration();
+ if (!duration) {
+ // Some devices report failure in QueueSample while actually succeeding at
+ // it, in which case we get an output buffer without having a cached duration
+ // (bug 1273523).
+ return NS_OK;
+ }
+
+ const auto buffer = jni::Object::LocalRef::Adopt(
+ frame.GetEnv()->GetObjectArrayElement(mOutputBuffers.Get(), aStatus));
+
+ if (buffer) {
+ // The buffer will be null on Android L if we are decoding to a Surface.
+ void* directBuffer = frame.GetEnv()->GetDirectBufferAddress(buffer.Get());
+ Output(aInfo, directBuffer, aFormat, duration.value());
+ }
+
+ // The Surface will be updated at this point (for video).
+ mDecoder->ReleaseOutputBuffer(aStatus, true);
+ PostOutput(aInfo, aFormat, duration.value());
+
+ return NS_OK;
+}
+
+void
+MediaCodecDataDecoder::DecoderLoop()
+{
+ bool isOutputDone = false;
+ AutoLocalJNIFrame frame(jni::GetEnvForThread(), 1);
+ MediaFormat::LocalRef outputFormat(frame.GetEnv());
+ nsresult res = NS_OK;
+
+ while (WaitForInput()) {
+ RefPtr<MediaRawData> sample = PeekNextSample();
+
+ {
+ MonitorAutoLock lock(mMonitor);
+ if (mState == ModuleState::kDrainDecoder) {
+ MOZ_ASSERT(!sample, "Shouldn't have a sample when pushing EOF frame");
+ res = QueueEOS();
+ BREAK_ON_DECODER_ERROR();
+ }
+ }
+
+ if (sample) {
+ res = QueueSample(sample);
+ if (NS_SUCCEEDED(res)) {
+ // We've fed this into the decoder, so remove it from the queue.
+ MonitorAutoLock lock(mMonitor);
+ MOZ_RELEASE_ASSERT(mQueue.size(), "Queue may not be empty");
+ mQueue.pop_front();
+ isOutputDone = false;
+ }
+ }
+
+ if (isOutputDone) {
+ continue;
+ }
+
+ BufferInfo::LocalRef bufferInfo;
+ nsresult res = BufferInfo::New(&bufferInfo);
+ BREAK_ON_DECODER_ERROR();
+
+ int32_t outputStatus = -1;
+ res = mDecoder->DequeueOutputBuffer(bufferInfo, kDecoderTimeout,
+ &outputStatus);
+ BREAK_ON_DECODER_ERROR();
+
+ if (outputStatus == MediaCodec::INFO_TRY_AGAIN_LATER) {
+ // We might want to call mCallback->InputExhausted() here, but there seems
+ // to be some possible bad interactions here with the threading.
+ } else if (outputStatus == MediaCodec::INFO_OUTPUT_BUFFERS_CHANGED) {
+ res = ResetOutputBuffers();
+ BREAK_ON_DECODER_ERROR();
+ } else if (outputStatus == MediaCodec::INFO_OUTPUT_FORMAT_CHANGED) {
+ res = mDecoder->GetOutputFormat(ReturnTo(&outputFormat));
+ BREAK_ON_DECODER_ERROR();
+ } else if (outputStatus < 0) {
+ NS_WARNING("Unknown error from decoder!");
+ INVOKE_CALLBACK(Error,
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ __func__));
+ // Don't break here just in case it's recoverable. If it's not, other
+ // stuff will fail later and we'll bail out.
+ } else {
+ // We have a valid buffer index >= 0 here.
+ int32_t flags;
+ nsresult res = bufferInfo->Flags(&flags);
+ BREAK_ON_DECODER_ERROR();
+
+ if (flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) {
+ HandleEOS(outputStatus);
+ isOutputDone = true;
+ // We only queue empty EOF frames, so we're done for now.
+ continue;
+ }
+
+ res = ProcessOutput(bufferInfo, outputFormat, outputStatus);
+ BREAK_ON_DECODER_ERROR();
+ }
+ }
+
+ Cleanup();
+
+ // We're done.
+ MonitorAutoLock lock(mMonitor);
+ SetState(ModuleState::kShutdown);
+ mMonitor.Notify();
+}
+
+const char*
+MediaCodecDataDecoder::ModuleStateStr(ModuleState aState) {
+ switch (aState) {
+ case ModuleState::kDecoding: return "Decoding";
+ case ModuleState::kFlushing: return "Flushing";
+ case ModuleState::kDrainQueue: return "DrainQueue";
+ case ModuleState::kDrainDecoder: return "DrainDecoder";
+ case ModuleState::kDrainWaitEOS: return "DrainWaitEOS";
+ case ModuleState::kStopping: return "Stopping";
+ case ModuleState::kShutdown: return "Shutdown";
+ default: MOZ_ASSERT_UNREACHABLE("Invalid state.");
+ }
+ return "Unknown";
+}
+
+bool
+MediaCodecDataDecoder::SetState(ModuleState aState)
+{
+ bool ok = true;
+
+ if (mState == ModuleState::kShutdown) {
+ ok = false;
+ } else if (mState == ModuleState::kStopping) {
+ ok = aState == ModuleState::kShutdown;
+ } else if (aState == ModuleState::kDrainDecoder) {
+ ok = mState == ModuleState::kDrainQueue;
+ } else if (aState == ModuleState::kDrainWaitEOS) {
+ ok = mState == ModuleState::kDrainDecoder;
+ }
+
+ if (ok) {
+ LOG("%s -> %s", ModuleStateStr(mState), ModuleStateStr(aState));
+ mState = aState;
+ } else {
+ LOG("Fail to transit from %s to %s state", ModuleStateStr(mState), ModuleStateStr(aState));
+ }
+
+ return ok;
+}
+
+void
+MediaCodecDataDecoder::ClearQueue()
+{
+ mMonitor.AssertCurrentThreadOwns();
+
+ mQueue.clear();
+ mDurations.clear();
+}
+
+void
+MediaCodecDataDecoder::Input(MediaRawData* aSample)
+{
+ MonitorAutoLock lock(mMonitor);
+ mQueue.push_back(aSample);
+ lock.NotifyAll();
+}
+
+nsresult
+MediaCodecDataDecoder::ResetInputBuffers()
+{
+ return mDecoder->GetInputBuffers(ReturnTo(&mInputBuffers));
+}
+
+nsresult
+MediaCodecDataDecoder::ResetOutputBuffers()
+{
+ return mDecoder->GetOutputBuffers(ReturnTo(&mOutputBuffers));
+}
+
+void
+MediaCodecDataDecoder::Flush()
+{
+ MonitorAutoLock lock(mMonitor);
+ if (!SetState(ModuleState::kFlushing)) {
+ return;
+ }
+ lock.Notify();
+
+ while (mState == ModuleState::kFlushing) {
+ lock.Wait();
+ }
+}
+
+void
+MediaCodecDataDecoder::Drain()
+{
+ MonitorAutoLock lock(mMonitor);
+ if (mState == ModuleState::kDrainDecoder ||
+ mState == ModuleState::kDrainQueue) {
+ return;
+ }
+
+ SetState(ModuleState::kDrainQueue);
+ lock.Notify();
+}
+
+
+void
+MediaCodecDataDecoder::Shutdown()
+{
+ MonitorAutoLock lock(mMonitor);
+
+ SetState(ModuleState::kStopping);
+ lock.Notify();
+
+ while (mThread && mState != ModuleState::kShutdown) {
+ lock.Wait();
+ }
+
+ if (mThread) {
+ mThread->Shutdown();
+ mThread = nullptr;
+ }
+
+ if (mDecoder) {
+ mDecoder->Stop();
+ mDecoder->Release();
+ mDecoder = nullptr;
+ }
+}
+
+} // mozilla
diff --git a/dom/media/platforms/android/MediaCodecDataDecoder.h b/dom/media/platforms/android/MediaCodecDataDecoder.h
new file mode 100644
index 000000000..0db6407bf
--- /dev/null
+++ b/dom/media/platforms/android/MediaCodecDataDecoder.h
@@ -0,0 +1,126 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef MediaCodecDataDecoder_h_
+#define MediaCodecDataDecoder_h_
+
+#include "AndroidDecoderModule.h"
+
+#include "MediaCodec.h"
+#include "SurfaceTexture.h"
+#include "TimeUnits.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Maybe.h"
+
+#include <deque>
+
+namespace mozilla {
+
+typedef std::deque<RefPtr<MediaRawData>> SampleQueue;
+
+class MediaCodecDataDecoder : public MediaDataDecoder {
+public:
+ static MediaDataDecoder* CreateAudioDecoder(const AudioInfo& aConfig,
+ java::sdk::MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback);
+
+ static MediaDataDecoder* CreateVideoDecoder(const VideoInfo& aConfig,
+ java::sdk::MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer);
+
+ virtual ~MediaCodecDataDecoder();
+
+ RefPtr<MediaDataDecoder::InitPromise> Init() override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ void Input(MediaRawData* aSample) override;
+ const char* GetDescriptionName() const override
+ {
+ return "Android MediaCodec decoder";
+ }
+
+protected:
+ enum class ModuleState : uint8_t {
+ kDecoding = 0,
+ kFlushing,
+ kDrainQueue,
+ kDrainDecoder,
+ kDrainWaitEOS,
+ kStopping,
+ kShutdown
+ };
+
+ friend class AndroidDecoderModule;
+
+ MediaCodecDataDecoder(MediaData::Type aType,
+ const nsACString& aMimeType,
+ java::sdk::MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback);
+
+ static const char* ModuleStateStr(ModuleState aState);
+
+ virtual nsresult InitDecoder(java::sdk::Surface::Param aSurface);
+
+ virtual nsresult Output(java::sdk::BufferInfo::Param aInfo, void* aBuffer,
+ java::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration)
+ {
+ return NS_OK;
+ }
+
+ virtual nsresult PostOutput(java::sdk::BufferInfo::Param aInfo,
+ java::sdk::MediaFormat::Param aFormat, const media::TimeUnit& aDuration)
+ {
+ return NS_OK;
+ }
+
+ virtual void Cleanup() {};
+
+ nsresult ResetInputBuffers();
+ nsresult ResetOutputBuffers();
+
+ nsresult GetInputBuffer(JNIEnv* env, int index, jni::Object::LocalRef* buffer);
+ bool WaitForInput();
+ already_AddRefed<MediaRawData> PeekNextSample();
+ nsresult QueueSample(const MediaRawData* aSample);
+ nsresult QueueEOS();
+ void HandleEOS(int32_t aOutputStatus);
+ Maybe<media::TimeUnit> GetOutputDuration();
+ nsresult ProcessOutput(java::sdk::BufferInfo::Param aInfo,
+ java::sdk::MediaFormat::Param aFormat,
+ int32_t aStatus);
+ // Sets decoder state and returns whether the new state has become effective.
+ bool SetState(ModuleState aState);
+ void DecoderLoop();
+
+ virtual void ClearQueue();
+
+ MediaData::Type mType;
+
+ nsAutoCString mMimeType;
+ java::sdk::MediaFormat::GlobalRef mFormat;
+
+ MediaDataDecoderCallback* mCallback;
+
+ java::sdk::MediaCodec::GlobalRef mDecoder;
+
+ jni::ObjectArray::GlobalRef mInputBuffers;
+ jni::ObjectArray::GlobalRef mOutputBuffers;
+
+ nsCOMPtr<nsIThread> mThread;
+
+ // Only these members are protected by mMonitor.
+ Monitor mMonitor;
+
+ ModuleState mState;
+
+ SampleQueue mQueue;
+ // Durations are stored in microseconds.
+ std::deque<media::TimeUnit> mDurations;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/android/RemoteDataDecoder.cpp b/dom/media/platforms/android/RemoteDataDecoder.cpp
new file mode 100644
index 000000000..56af2601f
--- /dev/null
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -0,0 +1,489 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AndroidDecoderModule.h"
+#include "AndroidBridge.h"
+#include "AndroidSurfaceTexture.h"
+#include "FennecJNINatives.h"
+#include "GLImages.h"
+
+#include "MediaData.h"
+#include "MediaInfo.h"
+#include "VideoUtils.h"
+#include "VPXDecoder.h"
+
+#include "nsThreadUtils.h"
+#include "nsPromiseFlatString.h"
+#include "nsIGfxInfo.h"
+
+#include "prlog.h"
+
+#include <jni.h>
+
+#include <deque>
+
+#undef LOG
+#define LOG(arg, ...) MOZ_LOG(sAndroidDecoderModuleLog, \
+ mozilla::LogLevel::Debug, ("RemoteDataDecoder(%p)::%s: " arg, \
+ this, __func__, ##__VA_ARGS__))
+
+using namespace mozilla;
+using namespace mozilla::gl;
+using namespace mozilla::java;
+using namespace mozilla::java::sdk;
+using media::TimeUnit;
+
+namespace mozilla {
+
+class JavaCallbacksSupport
+ : public CodecProxy::NativeCallbacks::Natives<JavaCallbacksSupport>
+{
+public:
+ typedef CodecProxy::NativeCallbacks::Natives<JavaCallbacksSupport> Base;
+ using Base::AttachNative;
+
+ JavaCallbacksSupport(MediaDataDecoderCallback* aDecoderCallback)
+ : mDecoderCallback(aDecoderCallback)
+ {
+ MOZ_ASSERT(aDecoderCallback);
+ }
+
+ virtual ~JavaCallbacksSupport() {}
+
+ void OnInputExhausted()
+ {
+ if (mDecoderCallback) {
+ mDecoderCallback->InputExhausted();
+ }
+ }
+
+ virtual void HandleOutput(Sample::Param aSample) = 0;
+
+ void OnOutput(jni::Object::Param aSample)
+ {
+ if (mDecoderCallback) {
+ HandleOutput(Sample::Ref::From(aSample));
+ }
+ }
+
+ virtual void HandleOutputFormatChanged(MediaFormat::Param aFormat) {};
+
+ void OnOutputFormatChanged(jni::Object::Param aFormat)
+ {
+ if (mDecoderCallback) {
+ HandleOutputFormatChanged(MediaFormat::Ref::From(aFormat));
+ }
+ }
+
+ void OnError(bool aIsFatal)
+ {
+ if (mDecoderCallback) {
+ mDecoderCallback->Error(aIsFatal ?
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__) :
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
+ }
+ }
+
+ void DisposeNative()
+ {
+ // TODO
+ }
+
+ void Cancel()
+ {
+ mDecoderCallback = nullptr;
+ }
+
+protected:
+ MediaDataDecoderCallback* mDecoderCallback;
+};
+
+struct SampleTime final
+{
+ SampleTime(int64_t aStart, int64_t aDuration)
+ : mStart(aStart)
+ , mDuration(aDuration)
+ {}
+
+ int64_t mStart;
+ int64_t mDuration;
+};
+
+
+class RemoteVideoDecoder final : public RemoteDataDecoder
+{
+public:
+ class CallbacksSupport final : public JavaCallbacksSupport
+ {
+ public:
+ CallbacksSupport(RemoteVideoDecoder* aDecoder, MediaDataDecoderCallback* aCallback)
+ : JavaCallbacksSupport(aCallback)
+ , mDecoder(aDecoder)
+ {}
+
+ virtual ~CallbacksSupport() {}
+
+ void HandleOutput(Sample::Param aSample) override
+ {
+ Maybe<int64_t> durationUs = mDecoder->mInputDurations.Get();
+ if (!durationUs) {
+ return;
+ }
+
+ BufferInfo::LocalRef info = aSample->Info();
+
+ int32_t flags;
+ bool ok = NS_SUCCEEDED(info->Flags(&flags));
+ MOZ_ASSERT(ok);
+
+ int32_t offset;
+ ok |= NS_SUCCEEDED(info->Offset(&offset));
+ MOZ_ASSERT(ok);
+
+ int64_t presentationTimeUs;
+ ok |= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
+ MOZ_ASSERT(ok);
+
+ int32_t size;
+ ok |= NS_SUCCEEDED(info->Size(&size));
+ MOZ_ASSERT(ok);
+
+ NS_ENSURE_TRUE_VOID(ok);
+
+ if (size > 0) {
+ RefPtr<layers::Image> img =
+ new SurfaceTextureImage(mDecoder->mSurfaceTexture.get(), mDecoder->mConfig.mDisplay,
+ gl::OriginPos::BottomLeft);
+
+ RefPtr<VideoData> v =
+ VideoData::CreateFromImage(mDecoder->mConfig,
+ offset,
+ presentationTimeUs,
+ durationUs.value(),
+ img,
+ !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
+ presentationTimeUs,
+ gfx::IntRect(0, 0,
+ mDecoder->mConfig.mDisplay.width,
+ mDecoder->mConfig.mDisplay.height));
+
+ mDecoderCallback->Output(v);
+ }
+
+ if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
+ mDecoderCallback->DrainComplete();
+ }
+ }
+
+ friend class RemoteDataDecoder;
+
+ private:
+ RemoteVideoDecoder* mDecoder;
+ };
+
+ RemoteVideoDecoder(const VideoInfo& aConfig,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer)
+ : RemoteDataDecoder(MediaData::Type::VIDEO_DATA, aConfig.mMimeType,
+ aFormat, aCallback)
+ , mImageContainer(aImageContainer)
+ , mConfig(aConfig)
+ {
+ }
+
+ RefPtr<InitPromise> Init() override
+ {
+ mSurfaceTexture = AndroidSurfaceTexture::Create();
+ if (!mSurfaceTexture) {
+ NS_WARNING("Failed to create SurfaceTexture for video decode\n");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ if (!jni::IsFennec()) {
+ NS_WARNING("Remote decoding not supported in non-Fennec environment\n");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ // Register native methods.
+ JavaCallbacksSupport::Init();
+
+ mJavaCallbacks = CodecProxy::NativeCallbacks::New();
+ JavaCallbacksSupport::AttachNative(mJavaCallbacks,
+ mozilla::MakeUnique<CallbacksSupport>(this, mCallback));
+
+ mJavaDecoder = CodecProxy::Create(mFormat, mSurfaceTexture->JavaSurface(), mJavaCallbacks);
+ if (mJavaDecoder == nullptr) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ mInputDurations.Clear();
+
+ return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+ }
+
+ void Flush() override
+ {
+ mInputDurations.Clear();
+ RemoteDataDecoder::Flush();
+ }
+
+ void Drain() override
+ {
+ RemoteDataDecoder::Drain();
+ mInputDurations.Put(0);
+ }
+
+ void Input(MediaRawData* aSample) override
+ {
+ RemoteDataDecoder::Input(aSample);
+ mInputDurations.Put(aSample->mDuration);
+ }
+
+private:
+ class DurationQueue {
+ public:
+
+ void Clear()
+ {
+ mValues.clear();
+ }
+
+ void Put(int64_t aDurationUs)
+ {
+ mValues.emplace_back(aDurationUs);
+ }
+
+ Maybe<int64_t> Get()
+ {
+ if (mValues.empty()) {
+ return Nothing();
+ }
+
+ auto value = Some(mValues.front());
+ mValues.pop_front();
+
+ return value;
+ }
+
+ private:
+ std::deque<int64_t> mValues;
+ };
+
+ layers::ImageContainer* mImageContainer;
+ const VideoInfo& mConfig;
+ RefPtr<AndroidSurfaceTexture> mSurfaceTexture;
+ DurationQueue mInputDurations;
+};
+
+class RemoteAudioDecoder final : public RemoteDataDecoder
+{
+public:
+ RemoteAudioDecoder(const AudioInfo& aConfig,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback)
+ : RemoteDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
+ aFormat, aCallback)
+ , mConfig(aConfig)
+ {
+ JNIEnv* const env = jni::GetEnvForThread();
+
+ bool formatHasCSD = false;
+ NS_ENSURE_SUCCESS_VOID(aFormat->ContainsKey(NS_LITERAL_STRING("csd-0"), &formatHasCSD));
+
+ if (!formatHasCSD && aConfig.mCodecSpecificConfig->Length() >= 2) {
+ jni::ByteBuffer::LocalRef buffer(env);
+ buffer = jni::ByteBuffer::New(
+ aConfig.mCodecSpecificConfig->Elements(),
+ aConfig.mCodecSpecificConfig->Length());
+ NS_ENSURE_SUCCESS_VOID(aFormat->SetByteBuffer(NS_LITERAL_STRING("csd-0"),
+ buffer));
+ }
+ }
+
+ RefPtr<InitPromise> Init() override
+ {
+ // Register native methods.
+ JavaCallbacksSupport::Init();
+
+ mJavaCallbacks = CodecProxy::NativeCallbacks::New();
+ JavaCallbacksSupport::AttachNative(mJavaCallbacks,
+ mozilla::MakeUnique<CallbacksSupport>(this, mCallback));
+
+ mJavaDecoder = CodecProxy::Create(mFormat, nullptr, mJavaCallbacks);
+ if (mJavaDecoder == nullptr) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
+ }
+
+private:
+ class CallbacksSupport final : public JavaCallbacksSupport
+ {
+ public:
+ CallbacksSupport(RemoteAudioDecoder* aDecoder, MediaDataDecoderCallback* aCallback)
+ : JavaCallbacksSupport(aCallback)
+ , mDecoder(aDecoder)
+ {}
+
+ virtual ~CallbacksSupport() {}
+
+ void HandleOutput(Sample::Param aSample) override
+ {
+ BufferInfo::LocalRef info = aSample->Info();
+
+ int32_t flags;
+ bool ok = NS_SUCCEEDED(info->Flags(&flags));
+ MOZ_ASSERT(ok);
+
+ int32_t offset;
+ ok |= NS_SUCCEEDED(info->Offset(&offset));
+ MOZ_ASSERT(ok);
+
+ int64_t presentationTimeUs;
+ ok |= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
+ MOZ_ASSERT(ok);
+
+ int32_t size;
+ ok |= NS_SUCCEEDED(info->Size(&size));
+ MOZ_ASSERT(ok);
+
+ NS_ENSURE_TRUE_VOID(ok);
+
+ if (size > 0) {
+#ifdef MOZ_SAMPLE_TYPE_S16
+ const int32_t numSamples = size / 2;
+#else
+#error We only support 16-bit integer PCM
+#endif
+
+ const int32_t numFrames = numSamples / mOutputChannels;
+ AlignedAudioBuffer audio(numSamples);
+ if (!audio) {
+ return;
+ }
+
+ jni::ByteBuffer::LocalRef dest = jni::ByteBuffer::New(audio.get(), size);
+ aSample->WriteToByteBuffer(dest);
+
+ RefPtr<AudioData> data = new AudioData(0, presentationTimeUs,
+ FramesToUsecs(numFrames, mOutputSampleRate).value(),
+ numFrames,
+ Move(audio),
+ mOutputChannels,
+ mOutputSampleRate);
+
+ mDecoderCallback->Output(data);
+ }
+
+ if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
+ mDecoderCallback->DrainComplete();
+ return;
+ }
+ }
+
+ void HandleOutputFormatChanged(MediaFormat::Param aFormat) override
+ {
+ aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &mOutputChannels);
+ AudioConfig::ChannelLayout layout(mOutputChannels);
+ if (!layout.IsValid()) {
+ mDecoderCallback->Error(MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Invalid channel layout:%d", mOutputChannels)));
+ return;
+ }
+ aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &mOutputSampleRate);
+ LOG("Audio output format changed: channels:%d sample rate:%d", mOutputChannels, mOutputSampleRate);
+ }
+
+ private:
+ RemoteAudioDecoder* mDecoder;
+ int32_t mOutputChannels;
+ int32_t mOutputSampleRate;
+ };
+
+ const AudioInfo& mConfig;
+};
+
+MediaDataDecoder*
+RemoteDataDecoder::CreateAudioDecoder(const AudioInfo& aConfig,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback)
+{
+ return new RemoteAudioDecoder(aConfig, aFormat, aCallback);
+}
+
+MediaDataDecoder*
+RemoteDataDecoder::CreateVideoDecoder(const VideoInfo& aConfig,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer)
+{
+ return new RemoteVideoDecoder(aConfig, aFormat, aCallback, aImageContainer);
+}
+
+RemoteDataDecoder::RemoteDataDecoder(MediaData::Type aType,
+ const nsACString& aMimeType,
+ MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback)
+ : mType(aType)
+ , mMimeType(aMimeType)
+ , mFormat(aFormat)
+ , mCallback(aCallback)
+{
+}
+
+void
+RemoteDataDecoder::Flush()
+{
+ mJavaDecoder->Flush();
+}
+
+void
+RemoteDataDecoder::Drain()
+{
+ BufferInfo::LocalRef bufferInfo;
+ nsresult rv = BufferInfo::New(&bufferInfo);
+ NS_ENSURE_SUCCESS_VOID(rv);
+ bufferInfo->Set(0, 0, -1, MediaCodec::BUFFER_FLAG_END_OF_STREAM);
+
+ mJavaDecoder->Input(nullptr, bufferInfo, nullptr);
+}
+
+void
+RemoteDataDecoder::Shutdown()
+{
+ LOG("");
+ MOZ_ASSERT(mJavaDecoder && mJavaCallbacks);
+
+ mJavaDecoder->Release();
+ mJavaDecoder = nullptr;
+
+ JavaCallbacksSupport::GetNative(mJavaCallbacks)->Cancel();
+ mJavaCallbacks = nullptr;
+
+ mFormat = nullptr;
+}
+
+void
+RemoteDataDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(aSample != nullptr);
+
+ jni::ByteBuffer::LocalRef bytes = jni::ByteBuffer::New(const_cast<uint8_t*>(aSample->Data()),
+ aSample->Size());
+
+ BufferInfo::LocalRef bufferInfo;
+ nsresult rv = BufferInfo::New(&bufferInfo);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+ return;
+ }
+ bufferInfo->Set(0, aSample->Size(), aSample->mTime, 0);
+
+ mJavaDecoder->Input(bytes, bufferInfo, GetCryptoInfoFromSample(aSample));
+}
+
+} // mozilla
diff --git a/dom/media/platforms/android/RemoteDataDecoder.h b/dom/media/platforms/android/RemoteDataDecoder.h
new file mode 100644
index 000000000..219539a0a
--- /dev/null
+++ b/dom/media/platforms/android/RemoteDataDecoder.h
@@ -0,0 +1,62 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef RemoteDataDecoder_h_
+#define RemoteDataDecoder_h_
+
+#include "AndroidDecoderModule.h"
+
+#include "FennecJNIWrappers.h"
+
+#include "SurfaceTexture.h"
+#include "TimeUnits.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Maybe.h"
+
+#include <deque>
+
+namespace mozilla {
+
+class RemoteDataDecoder : public MediaDataDecoder {
+public:
+ static MediaDataDecoder* CreateAudioDecoder(const AudioInfo& aConfig,
+ java::sdk::MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback);
+
+ static MediaDataDecoder* CreateVideoDecoder(const VideoInfo& aConfig,
+ java::sdk::MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer);
+
+ virtual ~RemoteDataDecoder() {}
+
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ void Input(MediaRawData* aSample) override;
+ const char* GetDescriptionName() const override
+ {
+ return "android remote decoder";
+ }
+
+protected:
+ RemoteDataDecoder(MediaData::Type aType,
+ const nsACString& aMimeType,
+ java::sdk::MediaFormat::Param aFormat,
+ MediaDataDecoderCallback* aCallback);
+
+ MediaData::Type mType;
+
+ nsAutoCString mMimeType;
+ java::sdk::MediaFormat::GlobalRef mFormat;
+
+ MediaDataDecoderCallback* mCallback;
+
+ java::CodecProxy::GlobalRef mJavaDecoder;
+ java::CodecProxy::NativeCallbacks::GlobalRef mJavaCallbacks;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/apple/AppleATDecoder.cpp b/dom/media/platforms/apple/AppleATDecoder.cpp
new file mode 100644
index 000000000..65794d82c
--- /dev/null
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -0,0 +1,722 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AppleUtils.h"
+#include "MP4Decoder.h"
+#include "mp4_demuxer/Adts.h"
+#include "MediaInfo.h"
+#include "AppleATDecoder.h"
+#include "mozilla/Logging.h"
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/UniquePtr.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+#define FourCC2Str(n) ((char[5]){(char)(n >> 24), (char)(n >> 16), (char)(n >> 8), (char)(n), 0})
+
+namespace mozilla {
+
+AppleATDecoder::AppleATDecoder(const AudioInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback)
+ : mConfig(aConfig)
+ , mFileStreamError(false)
+ , mTaskQueue(aTaskQueue)
+ , mCallback(aCallback)
+ , mConverter(nullptr)
+ , mStream(nullptr)
+ , mIsFlushing(false)
+ , mParsedFramesForAACMagicCookie(0)
+ , mErrored(false)
+{
+ MOZ_COUNT_CTOR(AppleATDecoder);
+ LOG("Creating Apple AudioToolbox decoder");
+ LOG("Audio Decoder configuration: %s %d Hz %d channels %d bits per channel",
+ mConfig.mMimeType.get(),
+ mConfig.mRate,
+ mConfig.mChannels,
+ mConfig.mBitDepth);
+
+ if (mConfig.mMimeType.EqualsLiteral("audio/mpeg")) {
+ mFormatID = kAudioFormatMPEGLayer3;
+ } else if (mConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ mFormatID = kAudioFormatMPEG4AAC;
+ } else {
+ mFormatID = 0;
+ }
+}
+
+AppleATDecoder::~AppleATDecoder()
+{
+ MOZ_COUNT_DTOR(AppleATDecoder);
+ MOZ_ASSERT(!mConverter);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+AppleATDecoder::Init()
+{
+ if (!mFormatID) {
+ NS_ERROR("Non recognised format");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
+}
+
+void
+AppleATDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio",
+ aSample,
+ aSample->mDuration,
+ aSample->mTime,
+ aSample->mKeyframe ? " keyframe" : "",
+ (unsigned long long)aSample->Size());
+
+ // Queue a task to perform the actual decoding on a separate thread.
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod<RefPtr<MediaRawData>>(
+ this,
+ &AppleATDecoder::SubmitSample,
+ RefPtr<MediaRawData>(aSample));
+ mTaskQueue->Dispatch(runnable.forget());
+}
+
+void
+AppleATDecoder::ProcessFlush()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ mQueuedSamples.Clear();
+ if (mConverter) {
+ OSStatus rv = AudioConverterReset(mConverter);
+ if (rv) {
+ LOG("Error %d resetting AudioConverter", rv);
+ }
+ }
+ if (mErrored) {
+ mParsedFramesForAACMagicCookie = 0;
+ mMagicCookie.Clear();
+ ProcessShutdown();
+ mErrored = false;
+ }
+}
+
+void
+AppleATDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ LOG("Flushing AudioToolbox AAC decoder");
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleATDecoder::ProcessFlush);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ mIsFlushing = false;
+}
+
+void
+AppleATDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ LOG("Draining AudioToolbox AAC decoder");
+ mTaskQueue->AwaitIdle();
+ mCallback->DrainComplete();
+ Flush();
+}
+
+void
+AppleATDecoder::Shutdown()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleATDecoder::ProcessShutdown);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+}
+
+void
+AppleATDecoder::ProcessShutdown()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ if (mStream) {
+ OSStatus rv = AudioFileStreamClose(mStream);
+ if (rv) {
+ LOG("error %d disposing of AudioFileStream", rv);
+ return;
+ }
+ mStream = nullptr;
+ }
+
+ if (mConverter) {
+ LOG("Shutdown: Apple AudioToolbox AAC decoder");
+ OSStatus rv = AudioConverterDispose(mConverter);
+ if (rv) {
+ LOG("error %d disposing of AudioConverter", rv);
+ }
+ mConverter = nullptr;
+ }
+}
+
+struct PassthroughUserData {
+ UInt32 mChannels;
+ UInt32 mDataSize;
+ const void* mData;
+ AudioStreamPacketDescription mPacket;
+};
+
+// Error value we pass through the decoder to signal that nothing
+// has gone wrong during decoding and we're done processing the packet.
+const uint32_t kNoMoreDataErr = 'MOAR';
+
+static OSStatus
+_PassthroughInputDataCallback(AudioConverterRef aAudioConverter,
+ UInt32* aNumDataPackets /* in/out */,
+ AudioBufferList* aData /* in/out */,
+ AudioStreamPacketDescription** aPacketDesc,
+ void* aUserData)
+{
+ PassthroughUserData* userData = (PassthroughUserData*)aUserData;
+ if (!userData->mDataSize) {
+ *aNumDataPackets = 0;
+ return kNoMoreDataErr;
+ }
+
+ if (aPacketDesc) {
+ userData->mPacket.mStartOffset = 0;
+ userData->mPacket.mVariableFramesInPacket = 0;
+ userData->mPacket.mDataByteSize = userData->mDataSize;
+ *aPacketDesc = &userData->mPacket;
+ }
+
+ aData->mBuffers[0].mNumberChannels = userData->mChannels;
+ aData->mBuffers[0].mDataByteSize = userData->mDataSize;
+ aData->mBuffers[0].mData = const_cast<void*>(userData->mData);
+
+ // No more data to provide following this run.
+ userData->mDataSize = 0;
+
+ return noErr;
+}
+
+void
+AppleATDecoder::SubmitSample(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ if (mIsFlushing) {
+ return;
+ }
+
+ MediaResult rv = NS_OK;
+ if (!mConverter) {
+ rv = SetupDecoder(aSample);
+ if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) {
+ mCallback->Error(rv);
+ return;
+ }
+ }
+
+ mQueuedSamples.AppendElement(aSample);
+
+ if (rv == NS_OK) {
+ for (size_t i = 0; i < mQueuedSamples.Length(); i++) {
+ rv = DecodeSample(mQueuedSamples[i]);
+ if (NS_FAILED(rv)) {
+ mErrored = true;
+ mCallback->Error(rv);
+ return;
+ }
+ }
+ mQueuedSamples.Clear();
+ }
+ mCallback->InputExhausted();
+}
+
+MediaResult
+AppleATDecoder::DecodeSample(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Array containing the queued decoded audio frames, about to be output.
+ nsTArray<AudioDataValue> outputData;
+ UInt32 channels = mOutputFormat.mChannelsPerFrame;
+ // Pick a multiple of the frame size close to a power of two
+ // for efficient allocation.
+ const uint32_t MAX_AUDIO_FRAMES = 128;
+ const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels;
+
+ // Descriptions for _decompressed_ audio packets. ignored.
+ auto packets = MakeUnique<AudioStreamPacketDescription[]>(MAX_AUDIO_FRAMES);
+
+ // This API insists on having packets spoon-fed to it from a callback.
+ // This structure exists only to pass our state.
+ PassthroughUserData userData =
+ { channels, (UInt32)aSample->Size(), aSample->Data() };
+
+ // Decompressed audio buffer
+ AlignedAudioBuffer decoded(maxDecodedSamples);
+ if (!decoded) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ do {
+ AudioBufferList decBuffer;
+ decBuffer.mNumberBuffers = 1;
+ decBuffer.mBuffers[0].mNumberChannels = channels;
+ decBuffer.mBuffers[0].mDataByteSize =
+ maxDecodedSamples * sizeof(AudioDataValue);
+ decBuffer.mBuffers[0].mData = decoded.get();
+
+ // in: the max number of packets we can handle from the decoder.
+ // out: the number of packets the decoder is actually returning.
+ UInt32 numFrames = MAX_AUDIO_FRAMES;
+
+ OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
+ _PassthroughInputDataCallback,
+ &userData,
+ &numFrames /* in/out */,
+ &decBuffer,
+ packets.get());
+
+ if (rv && rv != kNoMoreDataErr) {
+ LOG("Error decoding audio sample: %d\n", rv);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Error decoding audio sample: %d @ %lld",
+ rv, aSample->mTime));
+ }
+
+ if (numFrames) {
+ outputData.AppendElements(decoded.get(), numFrames * channels);
+ }
+
+ if (rv == kNoMoreDataErr) {
+ break;
+ }
+ } while (true);
+
+ if (outputData.IsEmpty()) {
+ return NS_OK;
+ }
+
+ size_t numFrames = outputData.Length() / channels;
+ int rate = mOutputFormat.mSampleRate;
+ media::TimeUnit duration = FramesToTimeUnit(numFrames, rate);
+ if (!duration.IsValid()) {
+ NS_WARNING("Invalid count of accumulated audio samples");
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL(
+ "Invalid count of accumulated audio samples: num:%llu rate:%d",
+ uint64_t(numFrames), rate));
+ }
+
+#ifdef LOG_SAMPLE_DECODE
+ LOG("pushed audio at time %lfs; duration %lfs\n",
+ (double)aSample->mTime / USECS_PER_S,
+ duration.ToSeconds());
+#endif
+
+ AudioSampleBuffer data(outputData.Elements(), outputData.Length());
+ if (!data.Data()) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ if (mChannelLayout && !mAudioConverter) {
+ AudioConfig in(*mChannelLayout.get(), rate);
+ AudioConfig out(channels, rate);
+ if (!in.IsValid() || !out.IsValid()) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("Invalid audio config"));
+ }
+ mAudioConverter = MakeUnique<AudioConverter>(in, out);
+ }
+ if (mAudioConverter) {
+ MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
+ data = mAudioConverter->Process(Move(data));
+ }
+
+ RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
+ aSample->mTime,
+ duration.ToMicroseconds(),
+ numFrames,
+ data.Forget(),
+ channels,
+ rate);
+ mCallback->Output(audio);
+ return NS_OK;
+}
+
+MediaResult
+AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
+ const nsTArray<uint8_t>& aExtraData)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Request the properties from CoreAudio using the codec magic cookie
+ AudioFormatInfo formatInfo;
+ PodZero(&formatInfo.mASBD);
+ formatInfo.mASBD.mFormatID = mFormatID;
+ if (mFormatID == kAudioFormatMPEG4AAC) {
+ formatInfo.mASBD.mFormatFlags = mConfig.mExtendedProfile;
+ }
+ formatInfo.mMagicCookieSize = aExtraData.Length();
+ formatInfo.mMagicCookie = aExtraData.Elements();
+
+ UInt32 formatListSize;
+ // Attempt to retrieve the default format using
+ // kAudioFormatProperty_FormatInfo method.
+ // This method only retrieves the FramesPerPacket information required
+ // by the decoder, which depends on the codec type and profile.
+ aDesc.mFormatID = mFormatID;
+ aDesc.mChannelsPerFrame = mConfig.mChannels;
+ aDesc.mSampleRate = mConfig.mRate;
+ UInt32 inputFormatSize = sizeof(aDesc);
+ OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
+ 0,
+ NULL,
+ &inputFormatSize,
+ &aDesc);
+ if (NS_WARN_IF(rv)) {
+ return MediaResult(
+ NS_ERROR_FAILURE,
+ RESULT_DETAIL("Unable to get format info:%lld", int64_t(rv)));
+ }
+
+ // If any of the methods below fail, we will return the default format as
+ // created using kAudioFormatProperty_FormatInfo above.
+ rv = AudioFormatGetPropertyInfo(kAudioFormatProperty_FormatList,
+ sizeof(formatInfo),
+ &formatInfo,
+ &formatListSize);
+ if (rv || (formatListSize % sizeof(AudioFormatListItem))) {
+ return NS_OK;
+ }
+ size_t listCount = formatListSize / sizeof(AudioFormatListItem);
+ auto formatList = MakeUnique<AudioFormatListItem[]>(listCount);
+
+ rv = AudioFormatGetProperty(kAudioFormatProperty_FormatList,
+ sizeof(formatInfo),
+ &formatInfo,
+ &formatListSize,
+ formatList.get());
+ if (rv) {
+ return NS_OK;
+ }
+ LOG("found %u available audio stream(s)",
+ formatListSize / sizeof(AudioFormatListItem));
+ // Get the index number of the first playable format.
+ // This index number will be for the highest quality layer the platform
+ // is capable of playing.
+ UInt32 itemIndex;
+ UInt32 indexSize = sizeof(itemIndex);
+ rv = AudioFormatGetProperty(kAudioFormatProperty_FirstPlayableFormatFromList,
+ formatListSize,
+ formatList.get(),
+ &indexSize,
+ &itemIndex);
+ if (rv) {
+ return NS_OK;
+ }
+
+ aDesc = formatList[itemIndex].mASBD;
+
+ return NS_OK;
+}
+
+AudioConfig::Channel
+ConvertChannelLabel(AudioChannelLabel id)
+{
+ switch (id) {
+ case kAudioChannelLabel_Mono:
+ return AudioConfig::CHANNEL_MONO;
+ case kAudioChannelLabel_Left:
+ return AudioConfig::CHANNEL_LEFT;
+ case kAudioChannelLabel_Right:
+ return AudioConfig::CHANNEL_RIGHT;
+ case kAudioChannelLabel_Center:
+ return AudioConfig::CHANNEL_CENTER;
+ case kAudioChannelLabel_LFEScreen:
+ return AudioConfig::CHANNEL_LFE;
+ case kAudioChannelLabel_LeftSurround:
+ return AudioConfig::CHANNEL_LS;
+ case kAudioChannelLabel_RightSurround:
+ return AudioConfig::CHANNEL_RS;
+ case kAudioChannelLabel_CenterSurround:
+ return AudioConfig::CHANNEL_RCENTER;
+ case kAudioChannelLabel_RearSurroundLeft:
+ return AudioConfig::CHANNEL_RLS;
+ case kAudioChannelLabel_RearSurroundRight:
+ return AudioConfig::CHANNEL_RRS;
+ default:
+ return AudioConfig::CHANNEL_INVALID;
+ }
+}
+
+// Will set mChannelLayout if a channel layout could properly be identified
+// and is supported.
+nsresult
+AppleATDecoder::SetupChannelLayout()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Determine the channel layout.
+ UInt32 propertySize;
+ UInt32 size;
+ OSStatus status =
+ AudioConverterGetPropertyInfo(mConverter,
+ kAudioConverterOutputChannelLayout,
+ &propertySize, NULL);
+ if (status || !propertySize) {
+ LOG("Couldn't get channel layout property (%s)", FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+
+ auto data = MakeUnique<uint8_t[]>(propertySize);
+ size = propertySize;
+ status =
+ AudioConverterGetProperty(mConverter, kAudioConverterInputChannelLayout,
+ &size, data.get());
+ if (status || size != propertySize) {
+ LOG("Couldn't get channel layout property (%s)",
+ FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+
+ AudioChannelLayout* layout =
+ reinterpret_cast<AudioChannelLayout*>(data.get());
+ AudioChannelLayoutTag tag = layout->mChannelLayoutTag;
+
+ // if tag is kAudioChannelLayoutTag_UseChannelDescriptions then the structure
+ // directly contains the the channel layout mapping.
+ // If tag is kAudioChannelLayoutTag_UseChannelBitmap then the layout will
+ // be defined via the bitmap and can be retrieved using
+ // kAudioFormatProperty_ChannelLayoutForBitmap property.
+ // Otherwise the tag itself describes the layout.
+ if (tag != kAudioChannelLayoutTag_UseChannelDescriptions) {
+ AudioFormatPropertyID property =
+ tag == kAudioChannelLayoutTag_UseChannelBitmap
+ ? kAudioFormatProperty_ChannelLayoutForBitmap
+ : kAudioFormatProperty_ChannelLayoutForTag;
+
+ if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
+ status =
+ AudioFormatGetPropertyInfo(property,
+ sizeof(UInt32), &layout->mChannelBitmap,
+ &propertySize);
+ } else {
+ status =
+ AudioFormatGetPropertyInfo(property,
+ sizeof(AudioChannelLayoutTag), &tag,
+ &propertySize);
+ }
+ if (status || !propertySize) {
+ LOG("Couldn't get channel layout property info (%s:%s)",
+ FourCC2Str(property), FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+ data = MakeUnique<uint8_t[]>(propertySize);
+ layout = reinterpret_cast<AudioChannelLayout*>(data.get());
+ size = propertySize;
+
+ if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
+ status = AudioFormatGetProperty(property,
+ sizeof(UInt32), &layout->mChannelBitmap,
+ &size, layout);
+ } else {
+ status = AudioFormatGetProperty(property,
+ sizeof(AudioChannelLayoutTag), &tag,
+ &size, layout);
+ }
+ if (status || size != propertySize) {
+ LOG("Couldn't get channel layout property (%s:%s)",
+ FourCC2Str(property), FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+ // We have retrieved the channel layout from the tag or bitmap.
+ // We can now directly use the channel descriptions.
+ layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
+ }
+
+ if (layout->mNumberChannelDescriptions > MAX_AUDIO_CHANNELS ||
+ layout->mNumberChannelDescriptions != mOutputFormat.mChannelsPerFrame) {
+ LOG("Nonsensical channel layout or not matching the original channel number");
+ return NS_ERROR_FAILURE;
+ }
+
+ AudioConfig::Channel channels[MAX_AUDIO_CHANNELS];
+ for (uint32_t i = 0; i < layout->mNumberChannelDescriptions; i++) {
+ AudioChannelLabel id = layout->mChannelDescriptions[i].mChannelLabel;
+ AudioConfig::Channel channel = ConvertChannelLabel(id);
+ channels[i] = channel;
+ }
+ mChannelLayout =
+ MakeUnique<AudioConfig::ChannelLayout>(mOutputFormat.mChannelsPerFrame,
+ channels);
+ return NS_OK;
+}
+
+MediaResult
+AppleATDecoder::SetupDecoder(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ static const uint32_t MAX_FRAMES = 2;
+
+ if (mFormatID == kAudioFormatMPEG4AAC &&
+ mConfig.mExtendedProfile == 2 &&
+ mParsedFramesForAACMagicCookie < MAX_FRAMES) {
+ // Check for implicit SBR signalling if stream is AAC-LC
+ // This will provide us with an updated magic cookie for use with
+ // GetInputAudioDescription.
+ if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
+ !mMagicCookie.Length()) {
+ // nothing found yet, will try again later
+ mParsedFramesForAACMagicCookie++;
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ // An error occurred, fallback to using default stream description
+ }
+
+ LOG("Initializing Apple AudioToolbox decoder");
+
+ AudioStreamBasicDescription inputFormat;
+ PodZero(&inputFormat);
+ MediaResult rv =
+ GetInputAudioDescription(inputFormat,
+ mMagicCookie.Length() ?
+ mMagicCookie : *mConfig.mExtraData);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ // Fill in the output format manually.
+ PodZero(&mOutputFormat);
+ mOutputFormat.mFormatID = kAudioFormatLinearPCM;
+ mOutputFormat.mSampleRate = inputFormat.mSampleRate;
+ mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
+#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
+ mOutputFormat.mBitsPerChannel = 32;
+ mOutputFormat.mFormatFlags =
+ kLinearPCMFormatFlagIsFloat |
+ 0;
+#elif defined(MOZ_SAMPLE_TYPE_S16)
+ mOutputFormat.mBitsPerChannel = 16;
+ mOutputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | 0;
+#else
+# error Unknown audio sample type
+#endif
+ // Set up the decoder so it gives us one sample per frame
+ mOutputFormat.mFramesPerPacket = 1;
+ mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
+ = mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
+
+ OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
+ if (status) {
+ LOG("Error %d constructing AudioConverter", status);
+ mConverter = nullptr;
+ return MediaResult(
+ NS_ERROR_FAILURE,
+ RESULT_DETAIL("Error constructing AudioConverter:%lld", int64_t(status)));
+ }
+
+ if (NS_FAILED(SetupChannelLayout())) {
+ NS_WARNING("Couldn't retrieve channel layout, will use default layout");
+ }
+
+ return NS_OK;
+}
+
+static void
+_MetadataCallback(void* aAppleATDecoder,
+ AudioFileStreamID aStream,
+ AudioFileStreamPropertyID aProperty,
+ UInt32* aFlags)
+{
+ AppleATDecoder* decoder = static_cast<AppleATDecoder*>(aAppleATDecoder);
+ LOG("MetadataCallback receiving: '%s'", FourCC2Str(aProperty));
+ if (aProperty == kAudioFileStreamProperty_MagicCookieData) {
+ UInt32 size;
+ Boolean writeable;
+ OSStatus rv = AudioFileStreamGetPropertyInfo(aStream,
+ aProperty,
+ &size,
+ &writeable);
+ if (rv) {
+ LOG("Couldn't get property info for '%s' (%s)",
+ FourCC2Str(aProperty), FourCC2Str(rv));
+ decoder->mFileStreamError = true;
+ return;
+ }
+ auto data = MakeUnique<uint8_t[]>(size);
+ rv = AudioFileStreamGetProperty(aStream, aProperty,
+ &size, data.get());
+ if (rv) {
+ LOG("Couldn't get property '%s' (%s)",
+ FourCC2Str(aProperty), FourCC2Str(rv));
+ decoder->mFileStreamError = true;
+ return;
+ }
+ decoder->mMagicCookie.AppendElements(data.get(), size);
+ }
+}
+
+static void
+_SampleCallback(void* aSBR,
+ UInt32 aNumBytes,
+ UInt32 aNumPackets,
+ const void* aData,
+ AudioStreamPacketDescription* aPackets)
+{
+}
+
+nsresult
+AppleATDecoder::GetImplicitAACMagicCookie(const MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ // Prepend ADTS header to AAC audio.
+ RefPtr<MediaRawData> adtssample(aSample->Clone());
+ if (!adtssample) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ int8_t frequency_index =
+ mp4_demuxer::Adts::GetFrequencyIndex(mConfig.mRate);
+
+ bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.mChannels,
+ frequency_index,
+ mConfig.mProfile,
+ adtssample);
+ if (!rv) {
+ NS_WARNING("Failed to apply ADTS header");
+ return NS_ERROR_FAILURE;
+ }
+ if (!mStream) {
+ OSStatus rv = AudioFileStreamOpen(this,
+ _MetadataCallback,
+ _SampleCallback,
+ kAudioFileAAC_ADTSType,
+ &mStream);
+ if (rv) {
+ NS_WARNING("Couldn't open AudioFileStream");
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ OSStatus status = AudioFileStreamParseBytes(mStream,
+ adtssample->Size(),
+ adtssample->Data(),
+ 0 /* discontinuity */);
+ if (status) {
+ NS_WARNING("Couldn't parse sample");
+ }
+
+ if (status || mFileStreamError || mMagicCookie.Length()) {
+ // We have decoded a magic cookie or an error occurred as such
+ // we won't need the stream any longer.
+ AudioFileStreamClose(mStream);
+ mStream = nullptr;
+ }
+
+ return (mFileStreamError || status) ? NS_ERROR_FAILURE : NS_OK;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleATDecoder.h b/dom/media/platforms/apple/AppleATDecoder.h
new file mode 100644
index 000000000..be232e07b
--- /dev/null
+++ b/dom/media/platforms/apple/AppleATDecoder.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_AppleATDecoder_h
+#define mozilla_AppleATDecoder_h
+
+#include <AudioToolbox/AudioToolbox.h>
+#include "PlatformDecoderModule.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/Vector.h"
+#include "nsIThread.h"
+#include "AudioConverter.h"
+
+namespace mozilla {
+
+class TaskQueue;
+class MediaDataDecoderCallback;
+
+class AppleATDecoder : public MediaDataDecoder {
+public:
+ AppleATDecoder(const AudioInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback);
+ virtual ~AppleATDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+
+ const char* GetDescriptionName() const override
+ {
+ return "apple CoreMedia decoder";
+ }
+
+ // Callbacks also need access to the config.
+ const AudioInfo& mConfig;
+
+ // Use to extract magic cookie for HE-AAC detection.
+ nsTArray<uint8_t> mMagicCookie;
+ // Will be set to true should an error occurred while attempting to retrieve
+ // the magic cookie property.
+ bool mFileStreamError;
+
+private:
+ const RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+ AudioConverterRef mConverter;
+ AudioStreamBasicDescription mOutputFormat;
+ UInt32 mFormatID;
+ AudioFileStreamID mStream;
+ nsTArray<RefPtr<MediaRawData>> mQueuedSamples;
+ UniquePtr<AudioConfig::ChannelLayout> mChannelLayout;
+ UniquePtr<AudioConverter> mAudioConverter;
+ Atomic<bool> mIsFlushing;
+
+ void ProcessFlush();
+ void ProcessShutdown();
+ void SubmitSample(MediaRawData* aSample);
+ MediaResult DecodeSample(MediaRawData* aSample);
+ MediaResult GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
+ const nsTArray<uint8_t>& aExtraData);
+ // Setup AudioConverter once all information required has been gathered.
+ // Will return NS_ERROR_NOT_INITIALIZED if more data is required.
+ MediaResult SetupDecoder(MediaRawData* aSample);
+ nsresult GetImplicitAACMagicCookie(const MediaRawData* aSample);
+ nsresult SetupChannelLayout();
+ uint32_t mParsedFramesForAACMagicCookie;
+ bool mErrored;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleATDecoder_h
diff --git a/dom/media/platforms/apple/AppleCMFunctions.h b/dom/media/platforms/apple/AppleCMFunctions.h
new file mode 100644
index 000000000..2c0408490
--- /dev/null
+++ b/dom/media/platforms/apple/AppleCMFunctions.h
@@ -0,0 +1,12 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Construct references to each of the CoreMedia symbols we use.
+
+LINK_FUNC(VideoFormatDescriptionCreate)
+LINK_FUNC(BlockBufferCreateWithMemoryBlock)
+LINK_FUNC(SampleBufferCreate)
+LINK_FUNC(TimeMake)
diff --git a/dom/media/platforms/apple/AppleCMLinker.cpp b/dom/media/platforms/apple/AppleCMLinker.cpp
new file mode 100644
index 000000000..5227bf9e5
--- /dev/null
+++ b/dom/media/platforms/apple/AppleCMLinker.cpp
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <dlfcn.h>
+
+#include "AppleCMLinker.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsDebug.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+AppleCMLinker::LinkStatus
+AppleCMLinker::sLinkStatus = LinkStatus_INIT;
+
+void* AppleCMLinker::sLink = nullptr;
+CFStringRef AppleCMLinker::skPropExtensionAtoms = nullptr;
+CFStringRef AppleCMLinker::skPropFullRangeVideo = nullptr;
+
+#define LINK_FUNC(func) typeof(CM ## func) CM ## func;
+#include "AppleCMFunctions.h"
+#undef LINK_FUNC
+
+/* static */ bool
+AppleCMLinker::Link()
+{
+ if (sLinkStatus) {
+ return sLinkStatus == LinkStatus_SUCCEEDED;
+ }
+
+ const char* dlnames[] =
+ { "/System/Library/Frameworks/CoreMedia.framework/CoreMedia",
+ "/System/Library/PrivateFrameworks/CoreMedia.framework/CoreMedia" };
+ bool dlfound = false;
+ for (size_t i = 0; i < ArrayLength(dlnames); i++) {
+ if ((sLink = dlopen(dlnames[i], RTLD_NOW | RTLD_LOCAL))) {
+ dlfound = true;
+ break;
+ }
+ }
+ if (!dlfound) {
+ NS_WARNING("Couldn't load CoreMedia framework");
+ goto fail;
+ }
+
+#define LINK_FUNC2(func) \
+ func = (typeof(func))dlsym(sLink, #func); \
+ if (!func) { \
+ NS_WARNING("Couldn't load CoreMedia function " #func ); \
+ goto fail; \
+ }
+#define LINK_FUNC(func) LINK_FUNC2(CM ## func)
+#include "AppleCMFunctions.h"
+#undef LINK_FUNC
+#undef LINK_FUNC2
+
+ skPropExtensionAtoms =
+ GetIOConst("kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms");
+
+ skPropFullRangeVideo =
+ GetIOConst("kCMFormatDescriptionExtension_FullRangeVideo");
+
+ if (!skPropExtensionAtoms) {
+ goto fail;
+ }
+
+ LOG("Loaded CoreMedia framework.");
+ sLinkStatus = LinkStatus_SUCCEEDED;
+ return true;
+
+fail:
+ Unlink();
+
+ sLinkStatus = LinkStatus_FAILED;
+ return false;
+}
+
+/* static */ void
+AppleCMLinker::Unlink()
+{
+ if (sLink) {
+ LOG("Unlinking CoreMedia framework.");
+ dlclose(sLink);
+ sLink = nullptr;
+ sLinkStatus = LinkStatus_INIT;
+ }
+}
+
+/* static */ CFStringRef
+AppleCMLinker::GetIOConst(const char* symbol)
+{
+ CFStringRef* address = (CFStringRef*)dlsym(sLink, symbol);
+ if (!address) {
+ return nullptr;
+ }
+
+ return *address;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleCMLinker.h b/dom/media/platforms/apple/AppleCMLinker.h
new file mode 100644
index 000000000..74372b6af
--- /dev/null
+++ b/dom/media/platforms/apple/AppleCMLinker.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AppleCMLinker_h
+#define AppleCMLinker_h
+
+extern "C" {
+#pragma GCC visibility push(default)
+#include <CoreMedia/CoreMedia.h>
+#pragma GCC visibility pop
+}
+
+#include "nscore.h"
+
+namespace mozilla {
+
+class AppleCMLinker
+{
+public:
+ static bool Link();
+ static void Unlink();
+ static CFStringRef skPropExtensionAtoms;
+ static CFStringRef skPropFullRangeVideo;
+
+private:
+ static void* sLink;
+
+ static enum LinkStatus {
+ LinkStatus_INIT = 0,
+ LinkStatus_FAILED,
+ LinkStatus_SUCCEEDED
+ } sLinkStatus;
+
+ static CFStringRef GetIOConst(const char* symbol);
+};
+
+#define LINK_FUNC(func) extern typeof(CM ## func)* CM ## func;
+#include "AppleCMFunctions.h"
+#undef LINK_FUNC
+
+} // namespace mozilla
+
+#endif // AppleCMLinker_h
diff --git a/dom/media/platforms/apple/AppleDecoderModule.cpp b/dom/media/platforms/apple/AppleDecoderModule.cpp
new file mode 100644
index 000000000..9976c86ab
--- /dev/null
+++ b/dom/media/platforms/apple/AppleDecoderModule.cpp
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "AppleATDecoder.h"
+#include "AppleCMLinker.h"
+#include "AppleDecoderModule.h"
+#include "AppleVTDecoder.h"
+#include "AppleVTLinker.h"
+#include "MacIOSurfaceImage.h"
+#include "MediaPrefs.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Logging.h"
+#include "mozilla/gfx/gfxVars.h"
+
+namespace mozilla {
+
+bool AppleDecoderModule::sInitialized = false;
+bool AppleDecoderModule::sIsCoreMediaAvailable = false;
+bool AppleDecoderModule::sIsVTAvailable = false;
+bool AppleDecoderModule::sIsVTHWAvailable = false;
+bool AppleDecoderModule::sCanUseHardwareVideoDecoder = true;
+
+AppleDecoderModule::AppleDecoderModule()
+{
+}
+
+AppleDecoderModule::~AppleDecoderModule()
+{
+}
+
+/* static */
+void
+AppleDecoderModule::Init()
+{
+ if (sInitialized) {
+ return;
+ }
+
+ // Ensure IOSurface framework is loaded.
+ MacIOSurfaceLib::LoadLibrary();
+ const bool loaded = MacIOSurfaceLib::isInit();
+
+ // dlopen CoreMedia.framework if it's available.
+ sIsCoreMediaAvailable = AppleCMLinker::Link();
+ // dlopen VideoToolbox.framework if it's available.
+ // We must link both CM and VideoToolbox framework to allow for proper
+ // paired Link/Unlink calls
+ bool haveVideoToolbox = loaded && AppleVTLinker::Link();
+ sIsVTAvailable = sIsCoreMediaAvailable && haveVideoToolbox;
+
+ sIsVTHWAvailable = AppleVTLinker::skPropEnableHWAccel != nullptr;
+
+ sCanUseHardwareVideoDecoder = loaded &&
+ gfx::gfxVars::CanUseHardwareVideoDecoding();
+
+ sInitialized = true;
+}
+
+nsresult
+AppleDecoderModule::Startup()
+{
+ if (!sInitialized || !sIsVTAvailable) {
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+already_AddRefed<MediaDataDecoder>
+AppleDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> decoder =
+ new AppleVTDecoder(aParams.VideoConfig(),
+ aParams.mTaskQueue,
+ aParams.mCallback,
+ aParams.mImageContainer);
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+AppleDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> decoder =
+ new AppleATDecoder(aParams.AudioConfig(),
+ aParams.mTaskQueue,
+ aParams.mCallback);
+ return decoder.forget();
+}
+
+bool
+AppleDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ return (sIsCoreMediaAvailable &&
+ (aMimeType.EqualsLiteral("audio/mpeg") ||
+ aMimeType.EqualsLiteral("audio/mp4a-latm"))) ||
+ (sIsVTAvailable && (aMimeType.EqualsLiteral("video/mp4") ||
+ aMimeType.EqualsLiteral("video/avc")));
+}
+
+PlatformDecoderModule::ConversionRequired
+AppleDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ if (aConfig.IsVideo()) {
+ return ConversionRequired::kNeedAVCC;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleDecoderModule.h b/dom/media/platforms/apple/AppleDecoderModule.h
new file mode 100644
index 000000000..43a828e63
--- /dev/null
+++ b/dom/media/platforms/apple/AppleDecoderModule.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_AppleDecoderModule_h
+#define mozilla_AppleDecoderModule_h
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class AppleDecoderModule : public PlatformDecoderModule {
+public:
+ AppleDecoderModule();
+ virtual ~AppleDecoderModule();
+
+ nsresult Startup() override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+
+ static void Init();
+
+ static bool sCanUseHardwareVideoDecoder;
+
+private:
+ static bool sInitialized;
+ static bool sIsCoreMediaAvailable;
+ static bool sIsVTAvailable;
+ static bool sIsVTHWAvailable;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleDecoderModule_h
diff --git a/dom/media/platforms/apple/AppleUtils.h b/dom/media/platforms/apple/AppleUtils.h
new file mode 100644
index 000000000..9e30aff86
--- /dev/null
+++ b/dom/media/platforms/apple/AppleUtils.h
@@ -0,0 +1,98 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Utility functions to help with Apple API calls.
+
+#ifndef mozilla_AppleUtils_h
+#define mozilla_AppleUtils_h
+
+#include "mozilla/Attributes.h"
+
+namespace mozilla {
+
+// Wrapper class to call CFRelease on reference types
+// when they go out of scope.
+template <class T>
+class AutoCFRelease {
+public:
+ MOZ_IMPLICIT AutoCFRelease(T aRef)
+ : mRef(aRef)
+ {
+ }
+ ~AutoCFRelease()
+ {
+ if (mRef) {
+ CFRelease(mRef);
+ }
+ }
+ // Return the wrapped ref so it can be used as an in parameter.
+ operator T()
+ {
+ return mRef;
+ }
+ // Return a pointer to the wrapped ref for use as an out parameter.
+ T* receive()
+ {
+ return &mRef;
+ }
+
+private:
+ // Copy operator isn't supported and is not implemented.
+ AutoCFRelease<T>& operator=(const AutoCFRelease<T>&);
+ T mRef;
+};
+
+// CFRefPtr: A CoreFoundation smart pointer.
+template <class T>
+class CFRefPtr {
+public:
+ explicit CFRefPtr(T aRef)
+ : mRef(aRef)
+ {
+ if (mRef) {
+ CFRetain(mRef);
+ }
+ }
+ // Copy constructor.
+ CFRefPtr(const CFRefPtr<T>& aCFRefPtr)
+ : mRef(aCFRefPtr.mRef)
+ {
+ if (mRef) {
+ CFRetain(mRef);
+ }
+ }
+ // Copy operator
+ CFRefPtr<T>& operator=(const CFRefPtr<T>& aCFRefPtr)
+ {
+ if (mRef == aCFRefPtr.mRef) {
+ return;
+ }
+ if (mRef) {
+ CFRelease(mRef);
+ }
+ mRef = aCFRefPtr.mRef;
+ if (mRef) {
+ CFRetain(mRef);
+ }
+ return *this;
+ }
+ ~CFRefPtr()
+ {
+ if (mRef) {
+ CFRelease(mRef);
+ }
+ }
+ // Return the wrapped ref so it can be used as an in parameter.
+ operator T()
+ {
+ return mRef;
+ }
+
+private:
+ T mRef;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleUtils_h
diff --git a/dom/media/platforms/apple/AppleVTDecoder.cpp b/dom/media/platforms/apple/AppleVTDecoder.cpp
new file mode 100644
index 000000000..81638870a
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -0,0 +1,674 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <CoreFoundation/CFString.h>
+
+#include "AppleCMLinker.h"
+#include "AppleDecoderModule.h"
+#include "AppleUtils.h"
+#include "AppleVTDecoder.h"
+#include "AppleVTLinker.h"
+#include "MediaData.h"
+#include "mozilla/ArrayUtils.h"
+#include "mp4_demuxer/H264.h"
+#include "nsAutoPtr.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Logging.h"
+#include "VideoUtils.h"
+#include "gfxPlatform.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer)
+ : mExtraData(aConfig.mExtraData)
+ , mCallback(aCallback)
+ , mPictureWidth(aConfig.mImage.width)
+ , mPictureHeight(aConfig.mImage.height)
+ , mDisplayWidth(aConfig.mDisplay.width)
+ , mDisplayHeight(aConfig.mDisplay.height)
+ , mTaskQueue(aTaskQueue)
+ , mMaxRefFrames(mp4_demuxer::H264::ComputeMaxRefFrames(aConfig.mExtraData))
+ , mImageContainer(aImageContainer)
+ , mIsShutDown(false)
+#ifdef MOZ_WIDGET_UIKIT
+ , mUseSoftwareImages(true)
+#else
+ , mUseSoftwareImages(false)
+#endif
+ , mIsFlushing(false)
+ , mMonitor("AppleVideoDecoder")
+ , mFormat(nullptr)
+ , mSession(nullptr)
+ , mIsHardwareAccelerated(false)
+{
+ MOZ_COUNT_CTOR(AppleVTDecoder);
+ // TODO: Verify aConfig.mime_type.
+ LOG("Creating AppleVTDecoder for %dx%d h.264 video",
+ mDisplayWidth,
+ mDisplayHeight
+ );
+}
+
+AppleVTDecoder::~AppleVTDecoder()
+{
+ MOZ_COUNT_DTOR(AppleVTDecoder);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+AppleVTDecoder::Init()
+{
+ nsresult rv = InitializeSession();
+
+ if (NS_SUCCEEDED(rv)) {
+ return InitPromise::CreateAndResolve(TrackType::kVideoTrack, __func__);
+ }
+
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+}
+
+void
+AppleVTDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+
+ LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
+ aSample,
+ aSample->mTime,
+ aSample->mDuration,
+ aSample->mKeyframe ? " keyframe" : "",
+ aSample->Size());
+
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &AppleVTDecoder::ProcessDecode, aSample));
+}
+
+void
+AppleVTDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleVTDecoder::ProcessFlush);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ mIsFlushing = false;
+
+ mSeekTargetThreshold.reset();
+}
+
+void
+AppleVTDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleVTDecoder::ProcessDrain);
+ mTaskQueue->Dispatch(runnable.forget());
+}
+
+void
+AppleVTDecoder::Shutdown()
+{
+ MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+ mIsShutDown = true;
+ if (mTaskQueue) {
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &AppleVTDecoder::ProcessShutdown);
+ mTaskQueue->Dispatch(runnable.forget());
+ } else {
+ ProcessShutdown();
+ }
+}
+
+nsresult
+AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ AssertOnTaskQueueThread();
+
+ if (mIsFlushing) {
+ return NS_OK;
+ }
+
+ auto rv = DoDecode(aSample);
+
+ return rv;
+}
+
+void
+AppleVTDecoder::ProcessShutdown()
+{
+ if (mSession) {
+ LOG("%s: cleaning up session %p", __func__, mSession);
+ VTDecompressionSessionInvalidate(mSession);
+ CFRelease(mSession);
+ mSession = nullptr;
+ }
+ if (mFormat) {
+ LOG("%s: releasing format %p", __func__, mFormat);
+ CFRelease(mFormat);
+ mFormat = nullptr;
+ }
+}
+
+void
+AppleVTDecoder::ProcessFlush()
+{
+ AssertOnTaskQueueThread();
+ nsresult rv = WaitForAsynchronousFrames();
+ if (NS_FAILED(rv)) {
+ LOG("AppleVTDecoder::Flush failed waiting for platform decoder "
+ "with error:%d.", rv);
+ }
+ ClearReorderedFrames();
+}
+
+void
+AppleVTDecoder::ProcessDrain()
+{
+ AssertOnTaskQueueThread();
+ nsresult rv = WaitForAsynchronousFrames();
+ if (NS_FAILED(rv)) {
+ LOG("AppleVTDecoder::Drain failed waiting for platform decoder "
+ "with error:%d.", rv);
+ }
+ DrainReorderedFrames();
+ mCallback->DrainComplete();
+}
+
+AppleVTDecoder::AppleFrameRef*
+AppleVTDecoder::CreateAppleFrameRef(const MediaRawData* aSample)
+{
+ MOZ_ASSERT(aSample);
+ return new AppleFrameRef(*aSample);
+}
+
+void
+AppleVTDecoder::DrainReorderedFrames()
+{
+ MonitorAutoLock mon(mMonitor);
+ while (!mReorderQueue.IsEmpty()) {
+ mCallback->Output(mReorderQueue.Pop().get());
+ }
+}
+
+void
+AppleVTDecoder::ClearReorderedFrames()
+{
+ MonitorAutoLock mon(mMonitor);
+ while (!mReorderQueue.IsEmpty()) {
+ mReorderQueue.Pop();
+ }
+}
+
+void
+AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+ LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
+ mSeekTargetThreshold = Some(aTime);
+}
+
+//
+// Implementation details.
+//
+
+// Callback passed to the VideoToolbox decoder for returning data.
+// This needs to be static because the API takes a C-style pair of
+// function and userdata pointers. This validates parameters and
+// forwards the decoded image back to an object method.
+static void
+PlatformCallback(void* decompressionOutputRefCon,
+ void* sourceFrameRefCon,
+ OSStatus status,
+ VTDecodeInfoFlags flags,
+ CVImageBufferRef image,
+ CMTime presentationTimeStamp,
+ CMTime presentationDuration)
+{
+ LOG("AppleVideoDecoder %s status %d flags %d", __func__, status, flags);
+
+ AppleVTDecoder* decoder =
+ static_cast<AppleVTDecoder*>(decompressionOutputRefCon);
+ nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef(
+ static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon));
+
+ // Validate our arguments.
+ if (status != noErr || !image) {
+ NS_WARNING("VideoToolbox decoder returned no data");
+ image = nullptr;
+ } else if (flags & kVTDecodeInfo_FrameDropped) {
+ NS_WARNING(" ...frame tagged as dropped...");
+ } else {
+ MOZ_ASSERT(CFGetTypeID(image) == CVPixelBufferGetTypeID(),
+ "VideoToolbox returned an unexpected image type");
+ }
+ decoder->OutputFrame(image, *frameRef);
+}
+
+// Copy and return a decoded frame.
+nsresult
+AppleVTDecoder::OutputFrame(CVPixelBufferRef aImage,
+ AppleVTDecoder::AppleFrameRef aFrameRef)
+{
+ if (mIsShutDown || mIsFlushing) {
+ // We are in the process of flushing or shutting down; ignore frame.
+ return NS_OK;
+ }
+
+ LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
+ aFrameRef.byte_offset,
+ aFrameRef.decode_timestamp.ToMicroseconds(),
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds(),
+ aFrameRef.is_sync_point ? " keyframe" : ""
+ );
+
+ if (!aImage) {
+ // Image was dropped by decoder or none return yet.
+ // We need more input to continue.
+ mCallback->InputExhausted();
+ return NS_OK;
+ }
+
+ bool useNullSample = false;
+ if (mSeekTargetThreshold.isSome()) {
+ if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
+ useNullSample = true;
+ } else {
+ mSeekTargetThreshold.reset();
+ }
+ }
+
+ // Where our resulting image will end up.
+ RefPtr<MediaData> data;
+ // Bounds.
+ VideoInfo info;
+ info.mDisplay = nsIntSize(mDisplayWidth, mDisplayHeight);
+ gfx::IntRect visible = gfx::IntRect(0,
+ 0,
+ mPictureWidth,
+ mPictureHeight);
+
+ if (useNullSample) {
+ data = new NullData(aFrameRef.byte_offset,
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds());
+ } else if (mUseSoftwareImages) {
+ size_t width = CVPixelBufferGetWidth(aImage);
+ size_t height = CVPixelBufferGetHeight(aImage);
+ DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
+ MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
+
+ VideoData::YCbCrBuffer buffer;
+
+ // Lock the returned image data.
+ CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
+ if (rv != kCVReturnSuccess) {
+ NS_ERROR("error locking pixel data");
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)));
+ return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+ }
+ // Y plane.
+ buffer.mPlanes[0].mData =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 0));
+ buffer.mPlanes[0].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 0);
+ buffer.mPlanes[0].mWidth = width;
+ buffer.mPlanes[0].mHeight = height;
+ buffer.mPlanes[0].mOffset = 0;
+ buffer.mPlanes[0].mSkip = 0;
+ // Cb plane.
+ buffer.mPlanes[1].mData =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
+ buffer.mPlanes[1].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
+ buffer.mPlanes[1].mWidth = (width+1) / 2;
+ buffer.mPlanes[1].mHeight = (height+1) / 2;
+ buffer.mPlanes[1].mOffset = 0;
+ buffer.mPlanes[1].mSkip = 1;
+ // Cr plane.
+ buffer.mPlanes[2].mData =
+ static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(aImage, 1));
+ buffer.mPlanes[2].mStride = CVPixelBufferGetBytesPerRowOfPlane(aImage, 1);
+ buffer.mPlanes[2].mWidth = (width+1) / 2;
+ buffer.mPlanes[2].mHeight = (height+1) / 2;
+ buffer.mPlanes[2].mOffset = 1;
+ buffer.mPlanes[2].mSkip = 1;
+
+ // Copy the image data into our own format.
+ data =
+ VideoData::CreateAndCopyData(info,
+ mImageContainer,
+ aFrameRef.byte_offset,
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds(),
+ buffer,
+ aFrameRef.is_sync_point,
+ aFrameRef.decode_timestamp.ToMicroseconds(),
+ visible);
+ // Unlock the returned image data.
+ CVPixelBufferUnlockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
+ } else {
+#ifndef MOZ_WIDGET_UIKIT
+ IOSurfacePtr surface = MacIOSurfaceLib::CVPixelBufferGetIOSurface(aImage);
+ MOZ_ASSERT(surface, "Decoder didn't return an IOSurface backed buffer");
+
+ RefPtr<MacIOSurface> macSurface = new MacIOSurface(surface);
+
+ RefPtr<layers::Image> image = new MacIOSurfaceImage(macSurface);
+
+ data =
+ VideoData::CreateFromImage(info,
+ aFrameRef.byte_offset,
+ aFrameRef.composition_timestamp.ToMicroseconds(),
+ aFrameRef.duration.ToMicroseconds(),
+ image.forget(),
+ aFrameRef.is_sync_point,
+ aFrameRef.decode_timestamp.ToMicroseconds(),
+ visible);
+#else
+ MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
+#endif
+ }
+
+ if (!data) {
+ NS_ERROR("Couldn't create VideoData for frame");
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ // Frames come out in DTS order but we need to output them
+ // in composition order.
+ MonitorAutoLock mon(mMonitor);
+ mReorderQueue.Push(data);
+ if (mReorderQueue.Length() > mMaxRefFrames) {
+ mCallback->Output(mReorderQueue.Pop().get());
+ }
+ mCallback->InputExhausted();
+ LOG("%llu decoded frames queued",
+ static_cast<unsigned long long>(mReorderQueue.Length()));
+
+ return NS_OK;
+}
+
+nsresult
+AppleVTDecoder::WaitForAsynchronousFrames()
+{
+ OSStatus rv = VTDecompressionSessionWaitForAsynchronousFrames(mSession);
+ if (rv != noErr) {
+ LOG("AppleVTDecoder: Error %d waiting for asynchronous frames", rv);
+ return NS_ERROR_FAILURE;
+ }
+ return NS_OK;
+}
+
+// Helper to fill in a timestamp structure.
+static CMSampleTimingInfo
+TimingInfoFromSample(MediaRawData* aSample)
+{
+ CMSampleTimingInfo timestamp;
+
+ timestamp.duration = CMTimeMake(aSample->mDuration, USECS_PER_S);
+ timestamp.presentationTimeStamp =
+ CMTimeMake(aSample->mTime, USECS_PER_S);
+ timestamp.decodeTimeStamp =
+ CMTimeMake(aSample->mTimecode, USECS_PER_S);
+
+ return timestamp;
+}
+
+MediaResult
+AppleVTDecoder::DoDecode(MediaRawData* aSample)
+{
+ AssertOnTaskQueueThread();
+
+ // For some reason this gives me a double-free error with stagefright.
+ AutoCFRelease<CMBlockBufferRef> block = nullptr;
+ AutoCFRelease<CMSampleBufferRef> sample = nullptr;
+ VTDecodeInfoFlags infoFlags;
+ OSStatus rv;
+
+ // FIXME: This copies the sample data. I think we can provide
+ // a custom block source which reuses the aSample buffer.
+ // But note that there may be a problem keeping the samples
+ // alive over multiple frames.
+ rv = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, // Struct allocator.
+ const_cast<uint8_t*>(aSample->Data()),
+ aSample->Size(),
+ kCFAllocatorNull, // Block allocator.
+ NULL, // Block source.
+ 0, // Data offset.
+ aSample->Size(),
+ false,
+ block.receive());
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create CMBlockBuffer");
+ mCallback->Error(
+ MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CMBlockBufferCreateWithMemoryBlock:%x", rv)));
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
+ rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, &timestamp, 0, NULL, sample.receive());
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create CMSampleBuffer");
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("CMSampleBufferCreate:%x", rv)));
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+
+ VTDecodeFrameFlags decodeFlags =
+ kVTDecodeFrame_EnableAsynchronousDecompression;
+ rv = VTDecompressionSessionDecodeFrame(mSession,
+ sample,
+ decodeFlags,
+ CreateAppleFrameRef(aSample),
+ &infoFlags);
+ if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {
+ LOG("AppleVTDecoder: Error %d VTDecompressionSessionDecodeFrame", rv);
+ NS_WARNING("Couldn't pass frame to decoder");
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("VTDecompressionSessionDecodeFrame:%x", rv)));
+ return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+AppleVTDecoder::InitializeSession()
+{
+ OSStatus rv;
+
+ AutoCFRelease<CFDictionaryRef> extensions = CreateDecoderExtensions();
+
+ rv = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
+ kCMVideoCodecType_H264,
+ mPictureWidth,
+ mPictureHeight,
+ extensions,
+ &mFormat);
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create format description!");
+ return NS_ERROR_FAILURE;
+ }
+
+ // Contruct video decoder selection spec.
+ AutoCFRelease<CFDictionaryRef> spec = CreateDecoderSpecification();
+
+ // Contruct output configuration.
+ AutoCFRelease<CFDictionaryRef> outputConfiguration =
+ CreateOutputConfiguration();
+
+ VTDecompressionOutputCallbackRecord cb = { PlatformCallback, this };
+ rv = VTDecompressionSessionCreate(kCFAllocatorDefault,
+ mFormat,
+ spec, // Video decoder selection.
+ outputConfiguration, // Output video format.
+ &cb,
+ &mSession);
+
+ if (rv != noErr) {
+ NS_ERROR("Couldn't create decompression session!");
+ return NS_ERROR_FAILURE;
+ }
+
+ if (AppleVTLinker::skPropUsingHWAccel) {
+ CFBooleanRef isUsingHW = nullptr;
+ rv = VTSessionCopyProperty(mSession,
+ AppleVTLinker::skPropUsingHWAccel,
+ kCFAllocatorDefault,
+ &isUsingHW);
+ if (rv != noErr) {
+ LOG("AppleVTDecoder: system doesn't support hardware acceleration");
+ }
+ mIsHardwareAccelerated = rv == noErr && isUsingHW == kCFBooleanTrue;
+ LOG("AppleVTDecoder: %s hardware accelerated decoding",
+ mIsHardwareAccelerated ? "using" : "not using");
+ } else {
+ LOG("AppleVTDecoder: couldn't determine hardware acceleration status.");
+ }
+ return NS_OK;
+}
+
+CFDictionaryRef
+AppleVTDecoder::CreateDecoderExtensions()
+{
+ AutoCFRelease<CFDataRef> avc_data =
+ CFDataCreate(kCFAllocatorDefault,
+ mExtraData->Elements(),
+ mExtraData->Length());
+
+ const void* atomsKey[] = { CFSTR("avcC") };
+ const void* atomsValue[] = { avc_data };
+ static_assert(ArrayLength(atomsKey) == ArrayLength(atomsValue),
+ "Non matching keys/values array size");
+
+ AutoCFRelease<CFDictionaryRef> atoms =
+ CFDictionaryCreate(kCFAllocatorDefault,
+ atomsKey,
+ atomsValue,
+ ArrayLength(atomsKey),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+
+ const void* extensionKeys[] =
+ { kCVImageBufferChromaLocationBottomFieldKey,
+ kCVImageBufferChromaLocationTopFieldKey,
+ AppleCMLinker::skPropExtensionAtoms };
+
+ const void* extensionValues[] =
+ { kCVImageBufferChromaLocation_Left,
+ kCVImageBufferChromaLocation_Left,
+ atoms };
+ static_assert(ArrayLength(extensionKeys) == ArrayLength(extensionValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ extensionKeys,
+ extensionValues,
+ ArrayLength(extensionKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+CFDictionaryRef
+AppleVTDecoder::CreateDecoderSpecification()
+{
+ if (!AppleVTLinker::skPropEnableHWAccel) {
+ return nullptr;
+ }
+
+ const void* specKeys[] = { AppleVTLinker::skPropEnableHWAccel };
+ const void* specValues[1];
+ if (AppleDecoderModule::sCanUseHardwareVideoDecoder) {
+ specValues[0] = kCFBooleanTrue;
+ } else {
+ // This GPU is blacklisted for hardware decoding.
+ specValues[0] = kCFBooleanFalse;
+ }
+ static_assert(ArrayLength(specKeys) == ArrayLength(specValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ specKeys,
+ specValues,
+ ArrayLength(specKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+}
+
+CFDictionaryRef
+AppleVTDecoder::CreateOutputConfiguration()
+{
+ if (mUseSoftwareImages) {
+ // Output format type:
+ SInt32 PixelFormatTypeValue =
+ kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
+ AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
+ CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberSInt32Type,
+ &PixelFormatTypeValue);
+ const void* outputKeys[] = { kCVPixelBufferPixelFormatTypeKey };
+ const void* outputValues[] = { PixelFormatTypeNumber };
+ static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ outputKeys,
+ outputValues,
+ ArrayLength(outputKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+ }
+
+#ifndef MOZ_WIDGET_UIKIT
+ // Output format type:
+ SInt32 PixelFormatTypeValue = kCVPixelFormatType_422YpCbCr8;
+ AutoCFRelease<CFNumberRef> PixelFormatTypeNumber =
+ CFNumberCreate(kCFAllocatorDefault,
+ kCFNumberSInt32Type,
+ &PixelFormatTypeValue);
+ // Construct IOSurface Properties
+ const void* IOSurfaceKeys[] = { MacIOSurfaceLib::kPropIsGlobal };
+ const void* IOSurfaceValues[] = { kCFBooleanTrue };
+ static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
+ "Non matching keys/values array size");
+
+ // Contruct output configuration.
+ AutoCFRelease<CFDictionaryRef> IOSurfaceProperties =
+ CFDictionaryCreate(kCFAllocatorDefault,
+ IOSurfaceKeys,
+ IOSurfaceValues,
+ ArrayLength(IOSurfaceKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+
+ const void* outputKeys[] = { kCVPixelBufferIOSurfacePropertiesKey,
+ kCVPixelBufferPixelFormatTypeKey,
+ kCVPixelBufferOpenGLCompatibilityKey };
+ const void* outputValues[] = { IOSurfaceProperties,
+ PixelFormatTypeNumber,
+ kCFBooleanTrue };
+ static_assert(ArrayLength(outputKeys) == ArrayLength(outputValues),
+ "Non matching keys/values array size");
+
+ return CFDictionaryCreate(kCFAllocatorDefault,
+ outputKeys,
+ outputValues,
+ ArrayLength(outputKeys),
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks);
+#else
+ MOZ_ASSERT_UNREACHABLE("No MacIOSurface on iOS");
+#endif
+}
+
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleVTDecoder.h b/dom/media/platforms/apple/AppleVTDecoder.h
new file mode 100644
index 000000000..05d08c7c7
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTDecoder.h
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_AppleVTDecoder_h
+#define mozilla_AppleVTDecoder_h
+
+#include "PlatformDecoderModule.h"
+#include "mozilla/Atomics.h"
+#include "nsIThread.h"
+#include "ReorderQueue.h"
+#include "TimeUnits.h"
+
+#include "VideoToolbox/VideoToolbox.h"
+
+namespace mozilla {
+
+class AppleVTDecoder : public MediaDataDecoder {
+public:
+ AppleVTDecoder(const VideoInfo& aConfig,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer);
+
+ class AppleFrameRef {
+ public:
+ media::TimeUnit decode_timestamp;
+ media::TimeUnit composition_timestamp;
+ media::TimeUnit duration;
+ int64_t byte_offset;
+ bool is_sync_point;
+
+ explicit AppleFrameRef(const MediaRawData& aSample)
+ : decode_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTimecode))
+ , composition_timestamp(media::TimeUnit::FromMicroseconds(aSample.mTime))
+ , duration(media::TimeUnit::FromMicroseconds(aSample.mDuration))
+ , byte_offset(aSample.mOffset)
+ , is_sync_point(aSample.mKeyframe)
+ {
+ }
+ };
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override
+ {
+ return mIsHardwareAccelerated;
+ }
+
+ const char* GetDescriptionName() const override
+ {
+ return mIsHardwareAccelerated
+ ? "apple hardware VT decoder"
+ : "apple software VT decoder";
+ }
+
+ // Access from the taskqueue and the decoder's thread.
+ // OutputFrame is thread-safe.
+ nsresult OutputFrame(CVPixelBufferRef aImage,
+ AppleFrameRef aFrameRef);
+
+private:
+ virtual ~AppleVTDecoder();
+ void ProcessFlush();
+ void ProcessDrain();
+ void ProcessShutdown();
+ nsresult ProcessDecode(MediaRawData* aSample);
+
+ void AssertOnTaskQueueThread()
+ {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ }
+
+ AppleFrameRef* CreateAppleFrameRef(const MediaRawData* aSample);
+ void DrainReorderedFrames();
+ void ClearReorderedFrames();
+ CFDictionaryRef CreateOutputConfiguration();
+
+ const RefPtr<MediaByteBuffer> mExtraData;
+ MediaDataDecoderCallback* mCallback;
+ const uint32_t mPictureWidth;
+ const uint32_t mPictureHeight;
+ const uint32_t mDisplayWidth;
+ const uint32_t mDisplayHeight;
+
+ // Method to set up the decompression session.
+ nsresult InitializeSession();
+ nsresult WaitForAsynchronousFrames();
+ CFDictionaryRef CreateDecoderSpecification();
+ CFDictionaryRef CreateDecoderExtensions();
+ // Method to pass a frame to VideoToolbox for decoding.
+ MediaResult DoDecode(MediaRawData* aSample);
+
+ const RefPtr<TaskQueue> mTaskQueue;
+ const uint32_t mMaxRefFrames;
+ const RefPtr<layers::ImageContainer> mImageContainer;
+ Atomic<bool> mIsShutDown;
+ const bool mUseSoftwareImages;
+
+ // Set on reader/decode thread calling Flush() to indicate that output is
+ // not required and so input samples on mTaskQueue need not be processed.
+ // Cleared on mTaskQueue in ProcessDrain().
+ Atomic<bool> mIsFlushing;
+ // Protects mReorderQueue.
+ Monitor mMonitor;
+ ReorderQueue mReorderQueue;
+ // Decoded frame will be dropped if its pts is smaller than this
+ // value. It shold be initialized before Input() or after Flush(). So it is
+ // safe to access it in OutputFrame without protecting.
+ Maybe<media::TimeUnit> mSeekTargetThreshold;
+
+ CMVideoFormatDescriptionRef mFormat;
+ VTDecompressionSessionRef mSession;
+ Atomic<bool> mIsHardwareAccelerated;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_AppleVTDecoder_h
diff --git a/dom/media/platforms/apple/AppleVTFunctions.h b/dom/media/platforms/apple/AppleVTFunctions.h
new file mode 100644
index 000000000..62765afa4
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTFunctions.h
@@ -0,0 +1,14 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Construct references to each of the VideoToolbox symbols we use.
+
+LINK_FUNC(VTDecompressionSessionCreate)
+LINK_FUNC(VTDecompressionSessionDecodeFrame)
+LINK_FUNC(VTDecompressionSessionInvalidate)
+LINK_FUNC(VTDecompressionSessionWaitForAsynchronousFrames)
+LINK_FUNC(VTSessionCopyProperty)
+LINK_FUNC(VTSessionCopySupportedPropertyDictionary)
diff --git a/dom/media/platforms/apple/AppleVTLinker.cpp b/dom/media/platforms/apple/AppleVTLinker.cpp
new file mode 100644
index 000000000..51a8a0122
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTLinker.cpp
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <dlfcn.h>
+
+#include "AppleVTLinker.h"
+#include "mozilla/ArrayUtils.h"
+#include "nsDebug.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+AppleVTLinker::LinkStatus
+AppleVTLinker::sLinkStatus = LinkStatus_INIT;
+
+void* AppleVTLinker::sLink = nullptr;
+CFStringRef AppleVTLinker::skPropEnableHWAccel = nullptr;
+CFStringRef AppleVTLinker::skPropUsingHWAccel = nullptr;
+
+#define LINK_FUNC(func) typeof(func) func;
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+
+/* static */ bool
+AppleVTLinker::Link()
+{
+ if (sLinkStatus) {
+ return sLinkStatus == LinkStatus_SUCCEEDED;
+ }
+
+ const char* dlnames[] =
+ { "/System/Library/Frameworks/VideoToolbox.framework/VideoToolbox",
+ "/System/Library/PrivateFrameworks/VideoToolbox.framework/VideoToolbox" };
+ bool dlfound = false;
+ for (size_t i = 0; i < ArrayLength(dlnames); i++) {
+ if ((sLink = dlopen(dlnames[i], RTLD_NOW | RTLD_LOCAL))) {
+ dlfound = true;
+ break;
+ }
+ }
+ if (!dlfound) {
+ NS_WARNING("Couldn't load VideoToolbox framework");
+ goto fail;
+ }
+
+#define LINK_FUNC(func) \
+ func = (typeof(func))dlsym(sLink, #func); \
+ if (!func) { \
+ NS_WARNING("Couldn't load VideoToolbox function " #func ); \
+ goto fail; \
+ }
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+
+ // Will only resolve in 10.9 and later.
+ skPropEnableHWAccel =
+ GetIOConst("kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder");
+ skPropUsingHWAccel =
+ GetIOConst("kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder");
+
+ LOG("Loaded VideoToolbox framework.");
+ sLinkStatus = LinkStatus_SUCCEEDED;
+ return true;
+
+fail:
+ Unlink();
+
+ sLinkStatus = LinkStatus_FAILED;
+ return false;
+}
+
+/* static */ void
+AppleVTLinker::Unlink()
+{
+ if (sLink) {
+ LOG("Unlinking VideoToolbox framework.");
+#define LINK_FUNC(func) \
+ func = nullptr;
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+ dlclose(sLink);
+ sLink = nullptr;
+ skPropEnableHWAccel = nullptr;
+ skPropUsingHWAccel = nullptr;
+ sLinkStatus = LinkStatus_INIT;
+ }
+}
+
+/* static */ CFStringRef
+AppleVTLinker::GetIOConst(const char* symbol)
+{
+ CFStringRef* address = (CFStringRef*)dlsym(sLink, symbol);
+ if (!address) {
+ return nullptr;
+ }
+
+ return *address;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/apple/AppleVTLinker.h b/dom/media/platforms/apple/AppleVTLinker.h
new file mode 100644
index 000000000..49783432d
--- /dev/null
+++ b/dom/media/platforms/apple/AppleVTLinker.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef AppleVTLinker_h
+#define AppleVTLinker_h
+
+extern "C" {
+#pragma GCC visibility push(default)
+#include "VideoToolbox/VideoToolbox.h"
+#pragma GCC visibility pop
+}
+
+#include "nscore.h"
+
+namespace mozilla {
+
+class AppleVTLinker
+{
+public:
+ static bool Link();
+ static void Unlink();
+ static CFStringRef skPropEnableHWAccel;
+ static CFStringRef skPropUsingHWAccel;
+
+private:
+ static void* sLink;
+
+ static enum LinkStatus {
+ LinkStatus_INIT = 0,
+ LinkStatus_FAILED,
+ LinkStatus_SUCCEEDED
+ } sLinkStatus;
+
+ static CFStringRef GetIOConst(const char* symbol);
+};
+
+#define LINK_FUNC(func) extern typeof(func)* func;
+#include "AppleVTFunctions.h"
+#undef LINK_FUNC
+
+} // namespace mozilla
+
+#endif // AppleVTLinker_h
diff --git a/dom/media/platforms/apple/VideoToolbox/VideoToolbox.h b/dom/media/platforms/apple/VideoToolbox/VideoToolbox.h
new file mode 100644
index 000000000..edd3f9c1b
--- /dev/null
+++ b/dom/media/platforms/apple/VideoToolbox/VideoToolbox.h
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Stub header for VideoToolbox framework API.
+// We include our own copy so we can build on MacOS versions
+// where it's not available.
+
+#ifndef mozilla_VideoToolbox_VideoToolbox_h
+#define mozilla_VideoToolbox_VideoToolbox_h
+
+// CoreMedia is available starting in OS X 10.7,
+// so we need to dlopen it as well to run on 10.6,
+// but we can depend on the real framework headers at build time.
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <CoreMedia/CoreMedia.h>
+#include <CoreVideo/CVPixelBuffer.h>
+
+typedef uint32_t VTDecodeFrameFlags;
+typedef uint32_t VTDecodeInfoFlags;
+enum {
+ kVTDecodeInfo_Asynchronous = 1UL << 0,
+ kVTDecodeInfo_FrameDropped = 1UL << 1,
+};
+enum {
+ kVTDecodeFrame_EnableAsynchronousDecompression = 1<<0,
+ kVTDecodeFrame_DoNotOutputFrame = 1<<1,
+ kVTDecodeFrame_1xRealTimePlayback = 1<<2,
+ kVTDecodeFrame_EnableTemporalProcessing = 1<<3,
+};
+
+typedef CFTypeRef VTSessionRef;
+typedef struct OpaqueVTDecompressionSession* VTDecompressionSessionRef;
+typedef void (*VTDecompressionOutputCallback)(
+ void*,
+ void*,
+ OSStatus,
+ VTDecodeInfoFlags,
+ CVImageBufferRef,
+ CMTime,
+ CMTime
+);
+typedef struct VTDecompressionOutputCallbackRecord {
+ VTDecompressionOutputCallback decompressionOutputCallback;
+ void* decompressionOutputRefCon;
+} VTDecompressionOutputCallbackRecord;
+
+OSStatus
+VTDecompressionSessionCreate(
+ CFAllocatorRef,
+ CMVideoFormatDescriptionRef,
+ CFDictionaryRef,
+ CFDictionaryRef,
+ const VTDecompressionOutputCallbackRecord*,
+ VTDecompressionSessionRef*
+);
+
+OSStatus
+VTDecompressionSessionDecodeFrame(
+ VTDecompressionSessionRef,
+ CMSampleBufferRef,
+ VTDecodeFrameFlags,
+ void*,
+ VTDecodeInfoFlags*
+);
+
+OSStatus
+VTDecompressionSessionWaitForAsynchronousFrames(
+ VTDecompressionSessionRef
+);
+
+void
+VTDecompressionSessionInvalidate(
+ VTDecompressionSessionRef
+);
+
+OSStatus
+VTSessionCopyProperty(
+ VTSessionRef,
+ CFStringRef,
+ CFAllocatorRef,
+ void*
+);
+
+OSStatus
+VTSessionCopySupportedPropertyDictionary(
+ VTSessionRef,
+ CFDictionaryRef*
+);
+
+#endif // mozilla_VideoToolbox_VideoToolbox_h
diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
new file mode 100644
index 000000000..f867ec494
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
@@ -0,0 +1,233 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/TaskQueue.h"
+
+#include "FFmpegAudioDecoder.h"
+#include "TimeUnits.h"
+
+#define MAX_CHANNELS 16
+
+namespace mozilla
+{
+
+FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
+ TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
+ const AudioInfo& aConfig)
+ : FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
+{
+ MOZ_COUNT_CTOR(FFmpegAudioDecoder);
+ // Use a new MediaByteBuffer as the object will be modified during initialization.
+ if (aConfig.mCodecSpecificConfig && aConfig.mCodecSpecificConfig->Length()) {
+ mExtraData = new MediaByteBuffer;
+ mExtraData->AppendElements(*aConfig.mCodecSpecificConfig);
+ }
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+FFmpegAudioDecoder<LIBAV_VER>::Init()
+{
+ nsresult rv = InitDecoder();
+
+ return rv == NS_OK ? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__)
+ : InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+}
+
+void
+FFmpegAudioDecoder<LIBAV_VER>::InitCodecContext()
+{
+ MOZ_ASSERT(mCodecContext);
+ // We do not want to set this value to 0 as FFmpeg by default will
+ // use the number of cores, which with our mozlibavutil get_cpu_count
+ // isn't implemented.
+ mCodecContext->thread_count = 1;
+ // FFmpeg takes this as a suggestion for what format to use for audio samples.
+ // LibAV 0.8 produces rubbish float interleaved samples, request 16 bits audio.
+ mCodecContext->request_sample_fmt =
+ (mLib->mVersion == 53) ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_FLT;
+}
+
+static AlignedAudioBuffer
+CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumAFrames)
+{
+ MOZ_ASSERT(aNumChannels <= MAX_CHANNELS);
+
+ AlignedAudioBuffer audio(aNumChannels * aNumAFrames);
+ if (!audio) {
+ return audio;
+ }
+
+ if (aFrame->format == AV_SAMPLE_FMT_FLT) {
+ // Audio data already packed. No need to do anything other than copy it
+ // into a buffer we own.
+ memcpy(audio.get(), aFrame->data[0],
+ aNumChannels * aNumAFrames * sizeof(AudioDataValue));
+ } else if (aFrame->format == AV_SAMPLE_FMT_FLTP) {
+ // Planar audio data. Pack it into something we can understand.
+ AudioDataValue* tmp = audio.get();
+ AudioDataValue** data = reinterpret_cast<AudioDataValue**>(aFrame->data);
+ for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
+ for (uint32_t channel = 0; channel < aNumChannels; channel++) {
+ *tmp++ = data[channel][frame];
+ }
+ }
+ } else if (aFrame->format == AV_SAMPLE_FMT_S16) {
+ // Audio data already packed. Need to convert from S16 to 32 bits Float
+ AudioDataValue* tmp = audio.get();
+ int16_t* data = reinterpret_cast<int16_t**>(aFrame->data)[0];
+ for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
+ for (uint32_t channel = 0; channel < aNumChannels; channel++) {
+ *tmp++ = AudioSampleToFloat(*data++);
+ }
+ }
+ } else if (aFrame->format == AV_SAMPLE_FMT_S16P) {
+ // Planar audio data. Convert it from S16 to 32 bits float
+ // and pack it into something we can understand.
+ AudioDataValue* tmp = audio.get();
+ int16_t** data = reinterpret_cast<int16_t**>(aFrame->data);
+ for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
+ for (uint32_t channel = 0; channel < aNumChannels; channel++) {
+ *tmp++ = AudioSampleToFloat(data[channel][frame]);
+ }
+ }
+ } else if (aFrame->format == AV_SAMPLE_FMT_S32) {
+ // Audio data already packed. Need to convert from S16 to 32 bits Float
+ AudioDataValue* tmp = audio.get();
+ int32_t* data = reinterpret_cast<int32_t**>(aFrame->data)[0];
+ for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
+ for (uint32_t channel = 0; channel < aNumChannels; channel++) {
+ *tmp++ = AudioSampleToFloat(*data++);
+ }
+ }
+ } else if (aFrame->format == AV_SAMPLE_FMT_S32P) {
+ // Planar audio data. Convert it from S32 to 32 bits float
+ // and pack it into something we can understand.
+ AudioDataValue* tmp = audio.get();
+ int32_t** data = reinterpret_cast<int32_t**>(aFrame->data);
+ for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
+ for (uint32_t channel = 0; channel < aNumChannels; channel++) {
+ *tmp++ = AudioSampleToFloat(data[channel][frame]);
+ }
+ }
+ }
+
+ return audio;
+}
+
+MediaResult
+FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
+{
+ AVPacket packet;
+ mLib->av_init_packet(&packet);
+
+ packet.data = const_cast<uint8_t*>(aSample->Data());
+ packet.size = aSample->Size();
+
+ if (!PrepareFrame()) {
+ return MediaResult(
+ NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
+ }
+
+ int64_t samplePosition = aSample->mOffset;
+ media::TimeUnit pts = media::TimeUnit::FromMicroseconds(aSample->mTime);
+
+ while (packet.size > 0) {
+ int decoded;
+ int bytesConsumed =
+ mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);
+
+ if (bytesConsumed < 0) {
+ NS_WARNING("FFmpeg audio decoder error.");
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
+ }
+
+ if (decoded) {
+ if (mFrame->format != AV_SAMPLE_FMT_FLT &&
+ mFrame->format != AV_SAMPLE_FMT_FLTP &&
+ mFrame->format != AV_SAMPLE_FMT_S16 &&
+ mFrame->format != AV_SAMPLE_FMT_S16P &&
+ mFrame->format != AV_SAMPLE_FMT_S32 &&
+ mFrame->format != AV_SAMPLE_FMT_S32P) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL(
+ "FFmpeg audio decoder outputs unsupported audio format"));
+ }
+ uint32_t numChannels = mCodecContext->channels;
+ AudioConfig::ChannelLayout layout(numChannels);
+ if (!layout.IsValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Unsupported channel layout:%u", numChannels));
+ }
+
+ uint32_t samplingRate = mCodecContext->sample_rate;
+
+ AlignedAudioBuffer audio =
+ CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
+ if (!audio) {
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+
+ media::TimeUnit duration =
+ FramesToTimeUnit(mFrame->nb_samples, samplingRate);
+ if (!duration.IsValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Invalid sample duration"));
+ }
+
+ RefPtr<AudioData> data = new AudioData(samplePosition,
+ pts.ToMicroseconds(),
+ duration.ToMicroseconds(),
+ mFrame->nb_samples,
+ Move(audio),
+ numChannels,
+ samplingRate);
+ mCallback->Output(data);
+ pts += duration;
+ if (!pts.IsValid()) {
+ return MediaResult(
+ NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
+ RESULT_DETAIL("Invalid count of accumulated audio samples"));
+ }
+ }
+ packet.data += bytesConsumed;
+ packet.size -= bytesConsumed;
+ samplePosition += bytesConsumed;
+ }
+ return NS_OK;
+}
+
+void
+FFmpegAudioDecoder<LIBAV_VER>::ProcessDrain()
+{
+ ProcessFlush();
+ mCallback->DrainComplete();
+}
+
+AVCodecID
+FFmpegAudioDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType)
+{
+ if (aMimeType.EqualsLiteral("audio/mpeg")) {
+ return AV_CODEC_ID_MP3;
+ } else if (aMimeType.EqualsLiteral("audio/flac")) {
+ return AV_CODEC_ID_FLAC;
+ } else if (aMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ return AV_CODEC_ID_AAC;
+ }
+
+ return AV_CODEC_ID_NONE;
+}
+
+FFmpegAudioDecoder<LIBAV_VER>::~FFmpegAudioDecoder()
+{
+ MOZ_COUNT_DTOR(FFmpegAudioDecoder);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
new file mode 100644
index 000000000..6adaeee14
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegAACDecoder_h__
+#define __FFmpegAACDecoder_h__
+
+#include "FFmpegLibWrapper.h"
+#include "FFmpegDataDecoder.h"
+
+namespace mozilla
+{
+
+template <int V> class FFmpegAudioDecoder
+{
+};
+
+template <>
+class FFmpegAudioDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
+{
+public:
+ FFmpegAudioDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ const AudioInfo& aConfig);
+ virtual ~FFmpegAudioDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void InitCodecContext() override;
+ static AVCodecID GetCodecId(const nsACString& aMimeType);
+ const char* GetDescriptionName() const override
+ {
+ return "ffmpeg audio decoder";
+ }
+
+private:
+ MediaResult DoDecode(MediaRawData* aSample) override;
+ void ProcessDrain() override;
+};
+
+} // namespace mozilla
+
+#endif // __FFmpegAACDecoder_h__
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
new file mode 100644
index 000000000..0b31fb0f9
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
@@ -0,0 +1,204 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/SyncRunnable.h"
+#include "mozilla/TaskQueue.h"
+
+#include <string.h>
+#ifdef __GNUC__
+#include <unistd.h>
+#endif
+
+#include "FFmpegLog.h"
+#include "FFmpegDataDecoder.h"
+#include "prsystem.h"
+
+namespace mozilla
+{
+
+StaticMutex FFmpegDataDecoder<LIBAV_VER>::sMonitor;
+
+ FFmpegDataDecoder<LIBAV_VER>::FFmpegDataDecoder(FFmpegLibWrapper* aLib,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ AVCodecID aCodecID)
+ : mLib(aLib)
+ , mCallback(aCallback)
+ , mCodecContext(nullptr)
+ , mFrame(NULL)
+ , mExtraData(nullptr)
+ , mCodecID(aCodecID)
+ , mTaskQueue(aTaskQueue)
+ , mIsFlushing(false)
+{
+ MOZ_ASSERT(aLib);
+ MOZ_COUNT_CTOR(FFmpegDataDecoder);
+}
+
+FFmpegDataDecoder<LIBAV_VER>::~FFmpegDataDecoder()
+{
+ MOZ_COUNT_DTOR(FFmpegDataDecoder);
+}
+
+nsresult
+FFmpegDataDecoder<LIBAV_VER>::InitDecoder()
+{
+ FFMPEG_LOG("Initialising FFmpeg decoder.");
+
+ AVCodec* codec = FindAVCodec(mLib, mCodecID);
+ if (!codec) {
+ NS_WARNING("Couldn't find ffmpeg decoder");
+ return NS_ERROR_FAILURE;
+ }
+
+ StaticMutexAutoLock mon(sMonitor);
+
+ if (!(mCodecContext = mLib->avcodec_alloc_context3(codec))) {
+ NS_WARNING("Couldn't init ffmpeg context");
+ return NS_ERROR_FAILURE;
+ }
+
+ mCodecContext->opaque = this;
+
+ InitCodecContext();
+
+ if (mExtraData) {
+ mCodecContext->extradata_size = mExtraData->Length();
+ // FFmpeg may use SIMD instructions to access the data which reads the
+ // data in 32 bytes block. Must ensure we have enough data to read.
+ mExtraData->AppendElements(FF_INPUT_BUFFER_PADDING_SIZE);
+ mCodecContext->extradata = mExtraData->Elements();
+ } else {
+ mCodecContext->extradata_size = 0;
+ }
+
+ if (codec->capabilities & CODEC_CAP_DR1) {
+ mCodecContext->flags |= CODEC_FLAG_EMU_EDGE;
+ }
+
+ if (mLib->avcodec_open2(mCodecContext, codec, nullptr) < 0) {
+ NS_WARNING("Couldn't initialise ffmpeg decoder");
+ mLib->avcodec_close(mCodecContext);
+ mLib->av_freep(&mCodecContext);
+ return NS_ERROR_FAILURE;
+ }
+
+ FFMPEG_LOG("FFmpeg init successful.");
+ return NS_OK;
+}
+
+void
+FFmpegDataDecoder<LIBAV_VER>::Shutdown()
+{
+ if (mTaskQueue) {
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown);
+ mTaskQueue->Dispatch(runnable.forget());
+ } else {
+ ProcessShutdown();
+ }
+}
+
+void
+FFmpegDataDecoder<LIBAV_VER>::ProcessDecode(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mIsFlushing) {
+ return;
+ }
+ MediaResult rv = DoDecode(aSample);
+ if (NS_FAILED(rv)) {
+ mCallback->Error(rv);
+ } else {
+ mCallback->InputExhausted();
+ }
+}
+
+void
+FFmpegDataDecoder<LIBAV_VER>::Input(MediaRawData* aSample)
+{
+ mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
+ this, &FFmpegDataDecoder::ProcessDecode, aSample));
+}
+
+void
+FFmpegDataDecoder<LIBAV_VER>::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessFlush);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ mIsFlushing = false;
+}
+
+void
+FFmpegDataDecoder<LIBAV_VER>::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &FFmpegDataDecoder<LIBAV_VER>::ProcessDrain);
+ mTaskQueue->Dispatch(runnable.forget());
+}
+
+void
+FFmpegDataDecoder<LIBAV_VER>::ProcessFlush()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mCodecContext) {
+ mLib->avcodec_flush_buffers(mCodecContext);
+ }
+}
+
+void
+FFmpegDataDecoder<LIBAV_VER>::ProcessShutdown()
+{
+ StaticMutexAutoLock mon(sMonitor);
+
+ if (mCodecContext) {
+ mLib->avcodec_close(mCodecContext);
+ mLib->av_freep(&mCodecContext);
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ mLib->av_frame_free(&mFrame);
+#elif LIBAVCODEC_VERSION_MAJOR == 54
+ mLib->avcodec_free_frame(&mFrame);
+#else
+ mLib->av_freep(&mFrame);
+#endif
+ }
+}
+
+AVFrame*
+FFmpegDataDecoder<LIBAV_VER>::PrepareFrame()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if (mFrame) {
+ mLib->av_frame_unref(mFrame);
+ } else {
+ mFrame = mLib->av_frame_alloc();
+ }
+#elif LIBAVCODEC_VERSION_MAJOR == 54
+ if (mFrame) {
+ mLib->avcodec_get_frame_defaults(mFrame);
+ } else {
+ mFrame = mLib->avcodec_alloc_frame();
+ }
+#else
+ mLib->av_freep(&mFrame);
+ mFrame = mLib->avcodec_alloc_frame();
+#endif
+ return mFrame;
+}
+
+/* static */ AVCodec*
+FFmpegDataDecoder<LIBAV_VER>::FindAVCodec(FFmpegLibWrapper* aLib,
+ AVCodecID aCodec)
+{
+ return aLib->avcodec_find_decoder(aCodec);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
new file mode 100644
index 000000000..f9ff9d3c4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegDataDecoder_h__
+#define __FFmpegDataDecoder_h__
+
+#include "PlatformDecoderModule.h"
+#include "FFmpegLibWrapper.h"
+#include "mozilla/StaticMutex.h"
+#include "FFmpegLibs.h"
+
+namespace mozilla
+{
+
+template <int V>
+class FFmpegDataDecoder : public MediaDataDecoder
+{
+};
+
+template <>
+class FFmpegDataDecoder<LIBAV_VER> : public MediaDataDecoder
+{
+public:
+ FFmpegDataDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ AVCodecID aCodecID);
+ virtual ~FFmpegDataDecoder();
+
+ static bool Link();
+
+ RefPtr<InitPromise> Init() override = 0;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+
+ static AVCodec* FindAVCodec(FFmpegLibWrapper* aLib, AVCodecID aCodec);
+
+protected:
+ // Flush and Drain operation, always run
+ virtual void ProcessFlush();
+ virtual void ProcessShutdown();
+ virtual void InitCodecContext() {}
+ AVFrame* PrepareFrame();
+ nsresult InitDecoder();
+
+ FFmpegLibWrapper* mLib;
+ MediaDataDecoderCallback* mCallback;
+
+ AVCodecContext* mCodecContext;
+ AVFrame* mFrame;
+ RefPtr<MediaByteBuffer> mExtraData;
+ AVCodecID mCodecID;
+
+private:
+ void ProcessDecode(MediaRawData* aSample);
+ virtual MediaResult DoDecode(MediaRawData* aSample) = 0;
+ virtual void ProcessDrain() = 0;
+
+ static StaticMutex sMonitor;
+ const RefPtr<TaskQueue> mTaskQueue;
+ // Set/cleared on reader thread calling Flush() to indicate that output is
+ // not required and so input samples on mTaskQueue need not be processed.
+ Atomic<bool> mIsFlushing;
+};
+
+} // namespace mozilla
+
+#endif // __FFmpegDataDecoder_h__
diff --git a/dom/media/platforms/ffmpeg/FFmpegDecoderModule.cpp b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.cpp
new file mode 100644
index 000000000..7366bd899
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.cpp
@@ -0,0 +1,13 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FFmpegDecoderModule.h"
+
+namespace mozilla {
+
+template class FFmpegDecoderModule<LIBAV_VER>;
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
new file mode 100644
index 000000000..969ac7e0b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
@@ -0,0 +1,86 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegDecoderModule_h__
+#define __FFmpegDecoderModule_h__
+
+#include "PlatformDecoderModule.h"
+#include "FFmpegLibWrapper.h"
+#include "FFmpegAudioDecoder.h"
+#include "FFmpegVideoDecoder.h"
+
+namespace mozilla
+{
+
+template <int V>
+class FFmpegDecoderModule : public PlatformDecoderModule
+{
+public:
+ static already_AddRefed<PlatformDecoderModule>
+ Create(FFmpegLibWrapper* aLib)
+ {
+ RefPtr<PlatformDecoderModule> pdm = new FFmpegDecoderModule(aLib);
+
+ return pdm.forget();
+ }
+
+ explicit FFmpegDecoderModule(FFmpegLibWrapper* aLib) : mLib(aLib) {}
+ virtual ~FFmpegDecoderModule() {}
+
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override
+ {
+ RefPtr<MediaDataDecoder> decoder =
+ new FFmpegVideoDecoder<V>(mLib,
+ aParams.mTaskQueue,
+ aParams.mCallback,
+ aParams.VideoConfig(),
+ aParams.mImageContainer);
+ return decoder.forget();
+ }
+
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override
+ {
+ RefPtr<MediaDataDecoder> decoder =
+ new FFmpegAudioDecoder<V>(mLib,
+ aParams.mTaskQueue,
+ aParams.mCallback,
+ aParams.AudioConfig());
+ return decoder.forget();
+ }
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override
+ {
+ AVCodecID videoCodec = FFmpegVideoDecoder<V>::GetCodecId(aMimeType);
+ AVCodecID audioCodec = FFmpegAudioDecoder<V>::GetCodecId(aMimeType);
+ if (audioCodec == AV_CODEC_ID_NONE && videoCodec == AV_CODEC_ID_NONE) {
+ return false;
+ }
+ AVCodecID codec = audioCodec != AV_CODEC_ID_NONE ? audioCodec : videoCodec;
+ return !!FFmpegDataDecoder<V>::FindAVCodec(mLib, codec);
+ }
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override
+ {
+ if (aConfig.IsVideo() &&
+ (aConfig.mMimeType.EqualsLiteral("video/avc") ||
+ aConfig.mMimeType.EqualsLiteral("video/mp4"))) {
+ return ConversionRequired::kNeedAVCC;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+ }
+
+private:
+ FFmpegLibWrapper* mLib;
+};
+
+} // namespace mozilla
+
+#endif // __FFmpegDecoderModule_h__
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
new file mode 100644
index 000000000..f3dc00ad7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
@@ -0,0 +1,178 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FFmpegLibWrapper.h"
+#include "FFmpegLog.h"
+#include "MediaPrefs.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/Types.h"
+#include "prlink.h"
+
+#define AV_LOG_DEBUG 48
+
+namespace mozilla
+{
+
+FFmpegLibWrapper::FFmpegLibWrapper()
+{
+ PodZero(this);
+}
+
+FFmpegLibWrapper::~FFmpegLibWrapper()
+{
+ Unlink();
+}
+
+FFmpegLibWrapper::LinkResult
+FFmpegLibWrapper::Link()
+{
+ if (!mAVCodecLib || !mAVUtilLib) {
+ Unlink();
+ return LinkResult::NoProvidedLib;
+ }
+
+ avcodec_version =
+ (decltype(avcodec_version))PR_FindSymbol(mAVCodecLib, "avcodec_version");
+ if (!avcodec_version) {
+ Unlink();
+ return LinkResult::NoAVCodecVersion;
+ }
+ uint32_t version = avcodec_version();
+ uint32_t macro = (version >> 16) & 0xFFu;
+ mVersion = static_cast<int>(macro);
+ uint32_t micro = version & 0xFFu;
+ // A micro version >= 100 indicates that it's FFmpeg (as opposed to LibAV).
+ bool isFFMpeg = micro >= 100;
+ if (!isFFMpeg) {
+ if (macro == 57) {
+ // Due to current AVCodecContext binary incompatibility we can only
+ // support FFmpeg 57 at this stage.
+ Unlink();
+ return LinkResult::CannotUseLibAV57;
+ }
+#ifdef MOZ_FFMPEG
+ if (version < (54u << 16 | 35u << 8 | 1u)
+ && !MediaPrefs::LibavcodecAllowObsolete()) {
+ // Refuse any libavcodec version prior to 54.35.1.
+ // (Unless media.libavcodec.allow-obsolete==true)
+ Unlink();
+ return LinkResult::BlockedOldLibAVVersion;
+ }
+#endif
+ }
+
+ enum {
+ AV_FUNC_AVUTIL_MASK = 1 << 8,
+ AV_FUNC_53 = 1 << 0,
+ AV_FUNC_54 = 1 << 1,
+ AV_FUNC_55 = 1 << 2,
+ AV_FUNC_56 = 1 << 3,
+ AV_FUNC_57 = 1 << 4,
+ AV_FUNC_AVUTIL_53 = AV_FUNC_53 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_54 = AV_FUNC_54 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_55 = AV_FUNC_55 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_56 = AV_FUNC_56 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVUTIL_57 = AV_FUNC_57 | AV_FUNC_AVUTIL_MASK,
+ AV_FUNC_AVCODEC_ALL = AV_FUNC_53 | AV_FUNC_54 | AV_FUNC_55 | AV_FUNC_56 | AV_FUNC_57,
+ AV_FUNC_AVUTIL_ALL = AV_FUNC_AVCODEC_ALL | AV_FUNC_AVUTIL_MASK
+ };
+
+ switch (macro) {
+ case 53:
+ version = AV_FUNC_53;
+ break;
+ case 54:
+ version = AV_FUNC_54;
+ break;
+ case 55:
+ version = AV_FUNC_55;
+ break;
+ case 56:
+ version = AV_FUNC_56;
+ break;
+ case 57:
+ version = AV_FUNC_57;
+ break;
+ default:
+ FFMPEG_LOG("Unknown avcodec version");
+ Unlink();
+ return isFFMpeg
+ ? ((macro > 57)
+ ? LinkResult::UnknownFutureFFMpegVersion
+ : LinkResult::UnknownOlderFFMpegVersion)
+ // All LibAV versions<54.35.1 are blocked, therefore we must be
+ // dealing with a later one.
+ : LinkResult::UnknownFutureLibAVVersion;
+ }
+
+#define AV_FUNC_OPTION(func, ver) \
+ if ((ver) & version) { \
+ if (!(func = (decltype(func))PR_FindSymbol(((ver) & AV_FUNC_AVUTIL_MASK) ? mAVUtilLib : mAVCodecLib, #func))) { \
+ FFMPEG_LOG("Couldn't load function " # func); \
+ } \
+ } else { \
+ func = (decltype(func))nullptr; \
+ }
+
+#define AV_FUNC(func, ver) \
+ AV_FUNC_OPTION(func, ver) \
+ if ((ver) & version && !func) { \
+ Unlink(); \
+ return isFFMpeg ? LinkResult::MissingFFMpegFunction \
+ : LinkResult::MissingLibAVFunction; \
+ }
+
+ AV_FUNC(av_lockmgr_register, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_alloc_context3, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_close, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_decode_audio4, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_decode_video2, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_find_decoder, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_flush_buffers, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_open2, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_register_all, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_init_packet, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_parser_init, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_parser_close, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(av_parser_parse2, AV_FUNC_AVCODEC_ALL)
+ AV_FUNC(avcodec_alloc_frame, (AV_FUNC_53 | AV_FUNC_54))
+ AV_FUNC(avcodec_get_frame_defaults, (AV_FUNC_53 | AV_FUNC_54))
+ AV_FUNC(avcodec_free_frame, AV_FUNC_54)
+ AV_FUNC(av_log_set_level, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_malloc, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_freep, AV_FUNC_AVUTIL_ALL)
+ AV_FUNC(av_frame_alloc, (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57))
+ AV_FUNC(av_frame_free, (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57))
+ AV_FUNC(av_frame_unref, (AV_FUNC_AVUTIL_55 | AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57))
+ AV_FUNC_OPTION(av_frame_get_colorspace, AV_FUNC_AVUTIL_ALL)
+#undef AV_FUNC
+#undef AV_FUNC_OPTION
+
+ avcodec_register_all();
+#ifdef DEBUG
+ av_log_set_level(AV_LOG_DEBUG);
+#endif
+
+ return LinkResult::Success;
+}
+
+void
+FFmpegLibWrapper::Unlink()
+{
+ if (av_lockmgr_register) {
+ // Registering a null lockmgr cause the destruction of libav* global mutexes
+ // as the default lockmgr that allocated them will be deregistered.
+ // This prevents ASAN and valgrind to report sizeof(pthread_mutex_t) leaks.
+ av_lockmgr_register(nullptr);
+ }
+ if (mAVUtilLib && mAVUtilLib != mAVCodecLib) {
+ PR_UnloadLibrary(mAVUtilLib);
+ }
+ if (mAVCodecLib) {
+ PR_UnloadLibrary(mAVCodecLib);
+ }
+ PodZero(this);
+}
+
+} // namespace mozilla \ No newline at end of file
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
new file mode 100644
index 000000000..d6944a1d8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
@@ -0,0 +1,94 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegLibWrapper_h__
+#define __FFmpegLibWrapper_h__
+
+#include "mozilla/Types.h"
+
+struct AVCodec;
+struct AVCodecContext;
+struct AVFrame;
+struct AVPacket;
+struct AVDictionary;
+struct AVCodecParserContext;
+struct PRLibrary;
+
+namespace mozilla
+{
+
+struct FFmpegLibWrapper
+{
+ FFmpegLibWrapper();
+ ~FFmpegLibWrapper();
+
+ enum class LinkResult
+ {
+ Success,
+ NoProvidedLib,
+ NoAVCodecVersion,
+ CannotUseLibAV57,
+ BlockedOldLibAVVersion,
+ UnknownFutureLibAVVersion,
+ UnknownFutureFFMpegVersion,
+ UnknownOlderFFMpegVersion,
+ MissingFFMpegFunction,
+ MissingLibAVFunction,
+ };
+ // Examine mAVCodecLib and mAVUtilLib, and attempt to resolve all symbols.
+ // Upon failure, the entire object will be reset and any attached libraries
+ // will be unlinked.
+ LinkResult Link();
+
+ // Reset the wrapper and unlink all attached libraries.
+ void Unlink();
+
+ // indicate the version of libavcodec linked to.
+ // 0 indicates that the function wasn't initialized with Link().
+ int mVersion;
+
+ // libavcodec
+ unsigned (*avcodec_version)();
+ int (*av_lockmgr_register)(int (*cb)(void** mutex, int op));
+ AVCodecContext* (*avcodec_alloc_context3)(const AVCodec* codec);
+ int (*avcodec_close)(AVCodecContext* avctx);
+ int (*avcodec_decode_audio4)(AVCodecContext* avctx, AVFrame* frame, int* got_frame_ptr, const AVPacket* avpkt);
+ int (*avcodec_decode_video2)(AVCodecContext* avctx, AVFrame* picture, int* got_picture_ptr, const AVPacket* avpkt);
+ AVCodec* (*avcodec_find_decoder)(int id);
+ void (*avcodec_flush_buffers)(AVCodecContext *avctx);
+ int (*avcodec_open2)(AVCodecContext *avctx, const AVCodec* codec, AVDictionary** options);
+ void (*avcodec_register_all)();
+ void (*av_init_packet)(AVPacket* pkt);
+ AVCodecParserContext* (*av_parser_init)(int codec_id);
+ void (*av_parser_close)(AVCodecParserContext* s);
+ int (*av_parser_parse2)(AVCodecParserContext* s, AVCodecContext* avctx, uint8_t** poutbuf, int* poutbuf_size, const uint8_t* buf, int buf_size, int64_t pts, int64_t dts, int64_t pos);
+
+ // only used in libavcodec <= 54
+ AVFrame* (*avcodec_alloc_frame)();
+ void (*avcodec_get_frame_defaults)(AVFrame* pic);
+ // libavcodec v54 only
+ void (*avcodec_free_frame)(AVFrame** frame);
+
+ // libavutil
+ void (*av_log_set_level)(int level);
+ void* (*av_malloc)(size_t size);
+ void (*av_freep)(void *ptr);
+
+ // libavutil v55 and later only
+ AVFrame* (*av_frame_alloc)();
+ void (*av_frame_free)(AVFrame** frame);
+ void (*av_frame_unref)(AVFrame* frame);
+
+ // libavutil optional
+ int (*av_frame_get_colorspace)(const AVFrame *frame);
+
+ PRLibrary* mAVCodecLib;
+ PRLibrary* mAVUtilLib;
+
+private:
+};
+
+} // namespace mozilla
+
+#endif // FFmpegLibWrapper \ No newline at end of file
diff --git a/dom/media/platforms/ffmpeg/FFmpegLibs.h b/dom/media/platforms/ffmpeg/FFmpegLibs.h
new file mode 100644
index 000000000..291ed7789
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegLibs.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegLibs_h__
+#define __FFmpegLibs_h__
+
+extern "C" {
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+#include "libavcodec/avcodec.h"
+#include "libavutil/avutil.h"
+#include "libavutil/mem.h"
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+}
+
+#if LIBAVCODEC_VERSION_MAJOR < 55
+#define AV_CODEC_ID_VP6F CODEC_ID_VP6F
+#define AV_CODEC_ID_H264 CODEC_ID_H264
+#define AV_CODEC_ID_AAC CODEC_ID_AAC
+#define AV_CODEC_ID_MP3 CODEC_ID_MP3
+#define AV_CODEC_ID_VP8 CODEC_ID_VP8
+#define AV_CODEC_ID_NONE CODEC_ID_NONE
+#define AV_CODEC_ID_FLAC CODEC_ID_FLAC
+typedef CodecID AVCodecID;
+#endif
+
+#ifdef FFVPX_VERSION
+enum { LIBAV_VER = FFVPX_VERSION };
+#else
+enum { LIBAV_VER = LIBAVCODEC_VERSION_MAJOR };
+#endif
+
+#endif // __FFmpegLibs_h__
diff --git a/dom/media/platforms/ffmpeg/FFmpegLog.h b/dom/media/platforms/ffmpeg/FFmpegLog.h
new file mode 100644
index 000000000..602ce5b71
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegLog.h
@@ -0,0 +1,14 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegLog_h__
+#define __FFmpegLog_h__
+
+#include "mozilla/Logging.h"
+
+#define FFMPEG_LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+#endif // __FFmpegLog_h__
diff --git a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
new file mode 100644
index 000000000..fdbe77ef7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
@@ -0,0 +1,169 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FFmpegRuntimeLinker.h"
+#include "FFmpegLibWrapper.h"
+#include "mozilla/ArrayUtils.h"
+#include "FFmpegLog.h"
+#include "prlink.h"
+
+namespace mozilla
+{
+
+FFmpegRuntimeLinker::LinkStatus FFmpegRuntimeLinker::sLinkStatus =
+ LinkStatus_INIT;
+const char* FFmpegRuntimeLinker::sLinkStatusLibraryName = "";
+
+template <int V> class FFmpegDecoderModule
+{
+public:
+ static already_AddRefed<PlatformDecoderModule> Create(FFmpegLibWrapper*);
+};
+
+static FFmpegLibWrapper sLibAV;
+
+static const char* sLibs[] = {
+#if defined(XP_DARWIN)
+ "libavcodec.57.dylib",
+ "libavcodec.56.dylib",
+ "libavcodec.55.dylib",
+ "libavcodec.54.dylib",
+ "libavcodec.53.dylib",
+#else
+ "libavcodec-ffmpeg.so.57",
+ "libavcodec-ffmpeg.so.56",
+ "libavcodec.so.57",
+ "libavcodec.so.56",
+ "libavcodec.so.55",
+ "libavcodec.so.54",
+ "libavcodec.so.53",
+#endif
+};
+
+/* static */ bool
+FFmpegRuntimeLinker::Init()
+{
+ if (sLinkStatus != LinkStatus_INIT) {
+ return sLinkStatus == LinkStatus_SUCCEEDED;
+ }
+
+ // While going through all possible libs, this status will be updated with a
+ // more precise error if possible.
+ sLinkStatus = LinkStatus_NOT_FOUND;
+
+ for (size_t i = 0; i < ArrayLength(sLibs); i++) {
+ const char* lib = sLibs[i];
+ PRLibSpec lspec;
+ lspec.type = PR_LibSpec_Pathname;
+ lspec.value.pathname = lib;
+ sLibAV.mAVCodecLib = PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
+ if (sLibAV.mAVCodecLib) {
+ sLibAV.mAVUtilLib = sLibAV.mAVCodecLib;
+ switch (sLibAV.Link()) {
+ case FFmpegLibWrapper::LinkResult::Success:
+ sLinkStatus = LinkStatus_SUCCEEDED;
+ sLinkStatusLibraryName = lib;
+ return true;
+ case FFmpegLibWrapper::LinkResult::NoProvidedLib:
+ MOZ_ASSERT_UNREACHABLE("Incorrectly-setup sLibAV");
+ break;
+ case FFmpegLibWrapper::LinkResult::NoAVCodecVersion:
+ if (sLinkStatus > LinkStatus_INVALID_CANDIDATE) {
+ sLinkStatus = LinkStatus_INVALID_CANDIDATE;
+ sLinkStatusLibraryName = lib;
+ }
+ break;
+ case FFmpegLibWrapper::LinkResult::CannotUseLibAV57:
+ if (sLinkStatus > LinkStatus_UNUSABLE_LIBAV57) {
+ sLinkStatus = LinkStatus_UNUSABLE_LIBAV57;
+ sLinkStatusLibraryName = lib;
+ }
+ break;
+ case FFmpegLibWrapper::LinkResult::BlockedOldLibAVVersion:
+ if (sLinkStatus > LinkStatus_OBSOLETE_LIBAV) {
+ sLinkStatus = LinkStatus_OBSOLETE_LIBAV;
+ sLinkStatusLibraryName = lib;
+ }
+ break;
+ case FFmpegLibWrapper::LinkResult::UnknownFutureLibAVVersion:
+ case FFmpegLibWrapper::LinkResult::MissingLibAVFunction:
+ if (sLinkStatus > LinkStatus_INVALID_LIBAV_CANDIDATE) {
+ sLinkStatus = LinkStatus_INVALID_LIBAV_CANDIDATE;
+ sLinkStatusLibraryName = lib;
+ }
+ break;
+ case FFmpegLibWrapper::LinkResult::UnknownFutureFFMpegVersion:
+ case FFmpegLibWrapper::LinkResult::MissingFFMpegFunction:
+ if (sLinkStatus > LinkStatus_INVALID_FFMPEG_CANDIDATE) {
+ sLinkStatus = LinkStatus_INVALID_FFMPEG_CANDIDATE;
+ sLinkStatusLibraryName = lib;
+ }
+ break;
+ case FFmpegLibWrapper::LinkResult::UnknownOlderFFMpegVersion:
+ if (sLinkStatus > LinkStatus_OBSOLETE_FFMPEG) {
+ sLinkStatus = LinkStatus_OBSOLETE_FFMPEG;
+ sLinkStatusLibraryName = lib;
+ }
+ break;
+ }
+ }
+ }
+
+ FFMPEG_LOG("H264/AAC codecs unsupported without [");
+ for (size_t i = 0; i < ArrayLength(sLibs); i++) {
+ FFMPEG_LOG("%s %s", i ? "," : " ", sLibs[i]);
+ }
+ FFMPEG_LOG(" ]\n");
+
+ return false;
+}
+
+/* static */ already_AddRefed<PlatformDecoderModule>
+FFmpegRuntimeLinker::CreateDecoderModule()
+{
+ if (!Init()) {
+ return nullptr;
+ }
+ RefPtr<PlatformDecoderModule> module;
+ switch (sLibAV.mVersion) {
+ case 53: module = FFmpegDecoderModule<53>::Create(&sLibAV); break;
+ case 54: module = FFmpegDecoderModule<54>::Create(&sLibAV); break;
+ case 55:
+ case 56: module = FFmpegDecoderModule<55>::Create(&sLibAV); break;
+ case 57: module = FFmpegDecoderModule<57>::Create(&sLibAV); break;
+ default: module = nullptr;
+ }
+ return module.forget();
+}
+
+/* static */ const char*
+FFmpegRuntimeLinker::LinkStatusString()
+{
+ switch (sLinkStatus) {
+ case LinkStatus_INIT:
+ return "Libavcodec not initialized yet";
+ case LinkStatus_SUCCEEDED:
+ return "Libavcodec linking succeeded";
+ case LinkStatus_INVALID_FFMPEG_CANDIDATE:
+ return "Invalid FFMpeg libavcodec candidate";
+ case LinkStatus_UNUSABLE_LIBAV57:
+ return "Unusable LibAV's libavcodec 57";
+ case LinkStatus_INVALID_LIBAV_CANDIDATE:
+ return "Invalid LibAV libavcodec candidate";
+ case LinkStatus_OBSOLETE_FFMPEG:
+ return "Obsolete FFMpeg libavcodec candidate";
+ case LinkStatus_OBSOLETE_LIBAV:
+ return "Obsolete LibAV libavcodec candidate";
+ case LinkStatus_INVALID_CANDIDATE:
+ return "Invalid libavcodec candidate";
+ case LinkStatus_NOT_FOUND:
+ return "Libavcodec not found";
+ }
+ MOZ_ASSERT_UNREACHABLE("Unknown sLinkStatus value");
+ return "?";
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.h b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.h
new file mode 100644
index 000000000..900c4ca79
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegRuntimeLinker_h__
+#define __FFmpegRuntimeLinker_h__
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla
+{
+
+class FFmpegRuntimeLinker
+{
+public:
+ static bool Init();
+ static already_AddRefed<PlatformDecoderModule> CreateDecoderModule();
+ enum LinkStatus
+ {
+ LinkStatus_INIT = 0, // Never been linked.
+ LinkStatus_SUCCEEDED, // Found a usable library.
+ // The following error statuses are sorted from most to least preferred
+ // (i.e., if more than one happens, the top one is chosen.)
+ LinkStatus_INVALID_FFMPEG_CANDIDATE, // Found ffmpeg with unexpected contents.
+ LinkStatus_UNUSABLE_LIBAV57, // Found LibAV 57, which we cannot use.
+ LinkStatus_INVALID_LIBAV_CANDIDATE, // Found libav with unexpected contents.
+ LinkStatus_OBSOLETE_FFMPEG,
+ LinkStatus_OBSOLETE_LIBAV,
+ LinkStatus_INVALID_CANDIDATE, // Found some lib with unexpected contents.
+ LinkStatus_NOT_FOUND, // Haven't found any library with an expected name.
+ };
+ static LinkStatus LinkStatusCode() { return sLinkStatus; }
+ static const char* LinkStatusString();
+ // Library name to which the sLinkStatus applies, or "" if not applicable.
+ static const char* LinkStatusLibraryName() { return sLinkStatusLibraryName; }
+
+private:
+ static LinkStatus sLinkStatus;
+ static const char* sLinkStatusLibraryName;
+};
+
+}
+
+#endif // __FFmpegRuntimeLinker_h__
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
new file mode 100644
index 000000000..5c1b6c97b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -0,0 +1,393 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/TaskQueue.h"
+
+#include "nsThreadUtils.h"
+#include "ImageContainer.h"
+
+#include "MediaInfo.h"
+#include "VPXDecoder.h"
+#include "MP4Decoder.h"
+
+#include "FFmpegVideoDecoder.h"
+#include "FFmpegLog.h"
+#include "mozilla/PodOperations.h"
+
+#include "libavutil/pixfmt.h"
+#if LIBAVCODEC_VERSION_MAJOR < 54
+#define AVPixelFormat PixelFormat
+#define AV_PIX_FMT_YUV420P PIX_FMT_YUV420P
+#define AV_PIX_FMT_YUVJ420P PIX_FMT_YUVJ420P
+#define AV_PIX_FMT_YUV444P PIX_FMT_YUV444P
+#define AV_PIX_FMT_NONE PIX_FMT_NONE
+#endif
+
+typedef mozilla::layers::Image Image;
+typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
+
+namespace mozilla
+{
+
+/**
+ * FFmpeg calls back to this function with a list of pixel formats it supports.
+ * We choose a pixel format that we support and return it.
+ * For now, we just look for YUV420P, YUVJ420P and YUV444 as those are the only
+ * only non-HW accelerated format supported by FFmpeg's H264 and VP9 decoder.
+ */
+static AVPixelFormat
+ChoosePixelFormat(AVCodecContext* aCodecContext, const AVPixelFormat* aFormats)
+{
+ FFMPEG_LOG("Choosing FFmpeg pixel format for video decoding.");
+ for (; *aFormats > -1; aFormats++) {
+ switch (*aFormats) {
+ case AV_PIX_FMT_YUV444P:
+ FFMPEG_LOG("Requesting pixel format YUV444P.");
+ return AV_PIX_FMT_YUV444P;
+ case AV_PIX_FMT_YUV420P:
+ FFMPEG_LOG("Requesting pixel format YUV420P.");
+ return AV_PIX_FMT_YUV420P;
+ case AV_PIX_FMT_YUVJ420P:
+ FFMPEG_LOG("Requesting pixel format YUVJ420P.");
+ return AV_PIX_FMT_YUVJ420P;
+ default:
+ break;
+ }
+ }
+
+ NS_WARNING("FFmpeg does not share any supported pixel formats.");
+ return AV_PIX_FMT_NONE;
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::PtsCorrectionContext()
+ : mNumFaultyPts(0)
+ , mNumFaultyDts(0)
+ , mLastPts(INT64_MIN)
+ , mLastDts(INT64_MIN)
+{
+}
+
+int64_t
+FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::GuessCorrectPts(int64_t aPts, int64_t aDts)
+{
+ int64_t pts = AV_NOPTS_VALUE;
+
+ if (aDts != int64_t(AV_NOPTS_VALUE)) {
+ mNumFaultyDts += aDts <= mLastDts;
+ mLastDts = aDts;
+ }
+ if (aPts != int64_t(AV_NOPTS_VALUE)) {
+ mNumFaultyPts += aPts <= mLastPts;
+ mLastPts = aPts;
+ }
+ if ((mNumFaultyPts <= mNumFaultyDts || aDts == int64_t(AV_NOPTS_VALUE)) &&
+ aPts != int64_t(AV_NOPTS_VALUE)) {
+ pts = aPts;
+ } else {
+ pts = aDts;
+ }
+ return pts;
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::PtsCorrectionContext::Reset()
+{
+ mNumFaultyPts = 0;
+ mNumFaultyDts = 0;
+ mLastPts = INT64_MIN;
+ mLastDts = INT64_MIN;
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(FFmpegLibWrapper* aLib,
+ TaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
+ const VideoInfo& aConfig,
+ ImageContainer* aImageContainer)
+ : FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
+ , mImageContainer(aImageContainer)
+ , mInfo(aConfig)
+ , mCodecParser(nullptr)
+ , mLastInputDts(INT64_MIN)
+{
+ MOZ_COUNT_CTOR(FFmpegVideoDecoder);
+ // Use a new MediaByteBuffer as the object will be modified during initialization.
+ mExtraData = new MediaByteBuffer;
+ mExtraData->AppendElements(*aConfig.mExtraData);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+FFmpegVideoDecoder<LIBAV_VER>::Init()
+{
+ if (NS_FAILED(InitDecoder())) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::InitCodecContext()
+{
+ mCodecContext->width = mInfo.mImage.width;
+ mCodecContext->height = mInfo.mImage.height;
+
+ // We use the same logic as libvpx in determining the number of threads to use
+ // so that we end up behaving in the same fashion when using ffmpeg as
+ // we would otherwise cause various crashes (see bug 1236167)
+ int decode_threads = 1;
+ if (mInfo.mDisplay.width >= 2048) {
+ decode_threads = 8;
+ } else if (mInfo.mDisplay.width >= 1024) {
+ decode_threads = 4;
+ } else if (mInfo.mDisplay.width >= 320) {
+ decode_threads = 2;
+ }
+
+ decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors() - 1);
+ decode_threads = std::max(decode_threads, 1);
+ mCodecContext->thread_count = decode_threads;
+ if (decode_threads > 1) {
+ mCodecContext->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME;
+ }
+
+ // FFmpeg will call back to this to negotiate a video pixel format.
+ mCodecContext->get_format = ChoosePixelFormat;
+
+ mCodecParser = mLib->av_parser_init(mCodecID);
+ if (mCodecParser) {
+ mCodecParser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+ }
+}
+
+MediaResult
+FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample)
+{
+ bool gotFrame = false;
+ return DoDecode(aSample, &gotFrame);
+}
+
+MediaResult
+FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
+{
+ uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
+ size_t inputSize = aSample->Size();
+
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+ if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ || mCodecID == AV_CODEC_ID_VP9
+#endif
+ )) {
+ while (inputSize) {
+ uint8_t* data;
+ int size;
+ int len = mLib->av_parser_parse2(mCodecParser, mCodecContext, &data, &size,
+ inputData, inputSize,
+ aSample->mTime, aSample->mTimecode,
+ aSample->mOffset);
+ if (size_t(len) > inputSize) {
+ return NS_ERROR_DOM_MEDIA_DECODE_ERR;
+ }
+ inputData += len;
+ inputSize -= len;
+ if (size) {
+ bool gotFrame = false;
+ MediaResult rv = DoDecode(aSample, data, size, &gotFrame);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (gotFrame && aGotFrame) {
+ *aGotFrame = true;
+ }
+ }
+ }
+ return NS_OK;
+ }
+#endif
+ return DoDecode(aSample, inputData, inputSize, aGotFrame);
+}
+
+MediaResult
+FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
+ uint8_t* aData, int aSize,
+ bool* aGotFrame)
+{
+ AVPacket packet;
+ mLib->av_init_packet(&packet);
+
+ packet.data = aData;
+ packet.size = aSize;
+ packet.dts = mLastInputDts = aSample->mTimecode;
+ packet.pts = aSample->mTime;
+ packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
+ packet.pos = aSample->mOffset;
+
+ // LibAV provides no API to retrieve the decoded sample's duration.
+ // (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
+ // As such we instead use a map using the dts as key that we will retrieve
+ // later.
+ // The map will have a typical size of 16 entry.
+ mDurationMap.Insert(aSample->mTimecode, aSample->mDuration);
+
+ if (!PrepareFrame()) {
+ NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+
+ // Required with old version of FFmpeg/LibAV
+ mFrame->reordered_opaque = AV_NOPTS_VALUE;
+
+ int decoded;
+ int bytesConsumed =
+ mLib->avcodec_decode_video2(mCodecContext, mFrame, &decoded, &packet);
+
+ FFMPEG_LOG("DoDecodeFrame:decode_video: rv=%d decoded=%d "
+ "(Input: pts(%lld) dts(%lld) Output: pts(%lld) "
+ "opaque(%lld) pkt_pts(%lld) pkt_dts(%lld))",
+ bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
+ mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);
+
+ if (bytesConsumed < 0) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("FFmpeg video error:%d", bytesConsumed));
+ }
+
+ if (!decoded) {
+ if (aGotFrame) {
+ *aGotFrame = false;
+ }
+ return NS_OK;
+ }
+
+ // If we've decoded a frame then we need to output it
+ int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
+ // Retrieve duration from dts.
+ // We use the first entry found matching this dts (this is done to
+ // handle damaged file with multiple frames with the same dts)
+
+ int64_t duration;
+ if (!mDurationMap.Find(mFrame->pkt_dts, duration)) {
+ NS_WARNING("Unable to retrieve duration from map");
+ duration = aSample->mDuration;
+ // dts are probably incorrectly reported ; so clear the map as we're
+ // unlikely to find them in the future anyway. This also guards
+ // against the map becoming extremely big.
+ mDurationMap.Clear();
+ }
+ FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
+ pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
+
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = mFrame->data[0];
+ b.mPlanes[1].mData = mFrame->data[1];
+ b.mPlanes[2].mData = mFrame->data[2];
+
+ b.mPlanes[0].mStride = mFrame->linesize[0];
+ b.mPlanes[1].mStride = mFrame->linesize[1];
+ b.mPlanes[2].mStride = mFrame->linesize[2];
+
+ b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
+ b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;
+ b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;
+
+ b.mPlanes[0].mWidth = mFrame->width;
+ b.mPlanes[0].mHeight = mFrame->height;
+ if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
+ b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
+ b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
+ } else {
+ b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
+ b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
+ }
+ if (mLib->av_frame_get_colorspace) {
+ switch (mLib->av_frame_get_colorspace(mFrame)) {
+ case AVCOL_SPC_BT709:
+ b.mYUVColorSpace = YUVColorSpace::BT709;
+ break;
+ case AVCOL_SPC_SMPTE170M:
+ case AVCOL_SPC_BT470BG:
+ b.mYUVColorSpace = YUVColorSpace::BT601;
+ break;
+ default:
+ break;
+ }
+ }
+ RefPtr<VideoData> v =
+ VideoData::CreateAndCopyData(mInfo,
+ mImageContainer,
+ aSample->mOffset,
+ pts,
+ duration,
+ b,
+ !!mFrame->key_frame,
+ -1,
+ mInfo.ScaledImageRect(mFrame->width,
+ mFrame->height));
+
+ if (!v) {
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("image allocation error"));
+ }
+ mCallback->Output(v);
+ if (aGotFrame) {
+ *aGotFrame = true;
+ }
+ return NS_OK;
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::ProcessDrain()
+{
+ RefPtr<MediaRawData> empty(new MediaRawData());
+ empty->mTimecode = mLastInputDts;
+ bool gotFrame = false;
+ while (NS_SUCCEEDED(DoDecode(empty, &gotFrame)) && gotFrame);
+ mCallback->DrainComplete();
+}
+
+void
+FFmpegVideoDecoder<LIBAV_VER>::ProcessFlush()
+{
+ mPtsContext.Reset();
+ mDurationMap.Clear();
+ FFmpegDataDecoder::ProcessFlush();
+}
+
+FFmpegVideoDecoder<LIBAV_VER>::~FFmpegVideoDecoder()
+{
+ MOZ_COUNT_DTOR(FFmpegVideoDecoder);
+ if (mCodecParser) {
+ mLib->av_parser_close(mCodecParser);
+ mCodecParser = nullptr;
+ }
+}
+
+AVCodecID
+FFmpegVideoDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType)
+{
+ if (MP4Decoder::IsH264(aMimeType)) {
+ return AV_CODEC_ID_H264;
+ }
+
+ if (aMimeType.EqualsLiteral("video/x-vnd.on2.vp6")) {
+ return AV_CODEC_ID_VP6F;
+ }
+
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+ if (VPXDecoder::IsVP8(aMimeType)) {
+ return AV_CODEC_ID_VP8;
+ }
+#endif
+
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ if (VPXDecoder::IsVP9(aMimeType)) {
+ return AV_CODEC_ID_VP9;
+ }
+#endif
+
+ return AV_CODEC_ID_NONE;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
new file mode 100644
index 000000000..786df0da1
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -0,0 +1,127 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFmpegVideoDecoder_h__
+#define __FFmpegVideoDecoder_h__
+
+#include "FFmpegLibWrapper.h"
+#include "FFmpegDataDecoder.h"
+#include "mozilla/Pair.h"
+#include "nsTArray.h"
+
+namespace mozilla
+{
+
+template <int V>
+class FFmpegVideoDecoder : public FFmpegDataDecoder<V>
+{
+};
+
+template <>
+class FFmpegVideoDecoder<LIBAV_VER> : public FFmpegDataDecoder<LIBAV_VER>
+{
+ typedef mozilla::layers::Image Image;
+ typedef mozilla::layers::ImageContainer ImageContainer;
+
+public:
+ FFmpegVideoDecoder(FFmpegLibWrapper* aLib, TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback,
+ const VideoInfo& aConfig,
+ ImageContainer* aImageContainer);
+ virtual ~FFmpegVideoDecoder();
+
+ RefPtr<InitPromise> Init() override;
+ void InitCodecContext() override;
+ const char* GetDescriptionName() const override
+ {
+#ifdef USING_MOZFFVPX
+ return "ffvpx video decoder";
+#else
+ return "ffmpeg video decoder";
+#endif
+ }
+ static AVCodecID GetCodecId(const nsACString& aMimeType);
+
+private:
+ MediaResult DoDecode(MediaRawData* aSample) override;
+ MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame);
+ MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame);
+ void ProcessDrain() override;
+ void ProcessFlush() override;
+ void OutputDelayedFrames();
+
+ /**
+ * This method allocates a buffer for FFmpeg's decoder, wrapped in an Image.
+ * Currently it only supports Planar YUV420, which appears to be the only
+ * non-hardware accelerated image format that FFmpeg's H264 decoder is
+ * capable of outputting.
+ */
+ int AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
+ AVFrame* aFrame);
+
+ RefPtr<ImageContainer> mImageContainer;
+ VideoInfo mInfo;
+
+ // Parser used for VP8 and VP9 decoding.
+ AVCodecParserContext* mCodecParser;
+
+ class PtsCorrectionContext {
+ public:
+ PtsCorrectionContext();
+ int64_t GuessCorrectPts(int64_t aPts, int64_t aDts);
+ void Reset();
+ int64_t LastDts() const { return mLastDts; }
+
+ private:
+ int64_t mNumFaultyPts; /// Number of incorrect PTS values so far
+ int64_t mNumFaultyDts; /// Number of incorrect DTS values so far
+ int64_t mLastPts; /// PTS of the last frame
+ int64_t mLastDts; /// DTS of the last frame
+ };
+
+ PtsCorrectionContext mPtsContext;
+ int64_t mLastInputDts;
+
+ class DurationMap {
+ public:
+ typedef Pair<int64_t, int64_t> DurationElement;
+
+ // Insert Key and Duration pair at the end of our map.
+ void Insert(int64_t aKey, int64_t aDuration)
+ {
+ mMap.AppendElement(MakePair(aKey, aDuration));
+ }
+ // Sets aDuration matching aKey and remove it from the map if found.
+ // The element returned is the first one found.
+ // Returns true if found, false otherwise.
+ bool Find(int64_t aKey, int64_t& aDuration)
+ {
+ for (uint32_t i = 0; i < mMap.Length(); i++) {
+ DurationElement& element = mMap[i];
+ if (element.first() == aKey) {
+ aDuration = element.second();
+ mMap.RemoveElementAt(i);
+ return true;
+ }
+ }
+ return false;
+ }
+ // Remove all elements of the map.
+ void Clear()
+ {
+ mMap.Clear();
+ }
+
+ private:
+ AutoTArray<DurationElement, 16> mMap;
+ };
+
+ DurationMap mDurationMap;
+};
+
+} // namespace mozilla
+
+#endif // __FFmpegVideoDecoder_h__
diff --git a/dom/media/platforms/ffmpeg/README_mozilla b/dom/media/platforms/ffmpeg/README_mozilla
new file mode 100644
index 000000000..19761f751
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/README_mozilla
@@ -0,0 +1,11 @@
+These headers are taken from Libav versions 0.8.9, 0.9 and 1.0.
+
+These headers are licensed under the GNU Lesser General Public License version
+2.1. For more information see the file COPYING.LGPLv2.1
+
+While the function ABIs of FFmpeg/Libav tend to be stable between even major
+versions, the class layouts can change considerably. This can lead to major
+crashes if we build against the wrong headers. We include these headers to make
+sure we have the same version every time we build, and to ease the pain of
+acquiring the correct libraries for a build, especially for those distributions
+who oppose distributing such packages.
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1 b/dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1
new file mode 100644
index 000000000..00b4fedfe
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/COPYING.LGPLv2.1
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avcodec.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avcodec.h
new file mode 100644
index 000000000..f365775f0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avcodec.h
@@ -0,0 +1,5418 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec external API header
+ */
+
+#include <errno.h>
+#include "libavutil/samplefmt.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/cpu.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "version.h"
+
+/**
+ * @defgroup libavc Encoding/Decoding Library
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ *
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of a existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+#if FF_API_XVMC
+ AV_CODEC_ID_MPEG2VIDEO_XVMC,
+#endif /* FF_API_XVMC */
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AIC,
+ AV_CODEC_ID_ESCAPE130,
+ AV_CODEC_ID_G2M,
+ AV_CODEC_ID_WEBP,
+ AV_CODEC_ID_HNM4_VIDEO,
+ AV_CODEC_ID_HEVC,
+#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
+ AV_CODEC_ID_FIC,
+ AV_CODEC_ID_ALIAS_PIX,
+ AV_CODEC_ID_BRENDER_PIX,
+ AV_CODEC_ID_PAF_VIDEO,
+ AV_CODEC_ID_EXR,
+ AV_CODEC_ID_VP7,
+ AV_CODEC_ID_SANM,
+ AV_CODEC_ID_SGIRLE,
+ AV_CODEC_ID_MVC1,
+ AV_CODEC_ID_MVC2,
+ AV_CODEC_ID_HQX,
+ AV_CODEC_ID_TDSC,
+ AV_CODEC_ID_HQ_HQA,
+ AV_CODEC_ID_HAP,
+ AV_CODEC_ID_DDS,
+ AV_CODEC_ID_DXV,
+ AV_CODEC_ID_SCREENPRESSO,
+ AV_CODEC_ID_RSCC,
+
+ AV_CODEC_ID_Y41P = 0x8000,
+ AV_CODEC_ID_AVRP,
+ AV_CODEC_ID_012V,
+ AV_CODEC_ID_AVUI,
+ AV_CODEC_ID_AYUV,
+ AV_CODEC_ID_TARGA_Y216,
+ AV_CODEC_ID_V308,
+ AV_CODEC_ID_V408,
+ AV_CODEC_ID_YUV4,
+ AV_CODEC_ID_AVRN,
+ AV_CODEC_ID_CPIA,
+ AV_CODEC_ID_XFACE,
+ AV_CODEC_ID_SNOW,
+ AV_CODEC_ID_SMVJPEG,
+ AV_CODEC_ID_APNG,
+ AV_CODEC_ID_DAALA,
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+ AV_CODEC_ID_PCM_S24LE_PLANAR,
+ AV_CODEC_ID_PCM_S32LE_PLANAR,
+ AV_CODEC_ID_PCM_S16BE_PLANAR,
+ /* new PCM "codecs" should be added right below this line starting with
+ * an explicit value of for example 0x10800
+ */
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+ AV_CODEC_ID_ADPCM_VIMA,
+#if FF_API_VIMA_DECODER
+ AV_CODEC_ID_VIMA = AV_CODEC_ID_ADPCM_VIMA,
+#endif
+
+ AV_CODEC_ID_ADPCM_AFC = 0x11800,
+ AV_CODEC_ID_ADPCM_IMA_OKI,
+ AV_CODEC_ID_ADPCM_DTK,
+ AV_CODEC_ID_ADPCM_IMA_RAD,
+ AV_CODEC_ID_ADPCM_G726LE,
+ AV_CODEC_ID_ADPCM_THP_LE,
+ AV_CODEC_ID_ADPCM_PSX,
+ AV_CODEC_ID_ADPCM_AICA,
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+
+ AV_CODEC_ID_SDX2_DPCM = 0x14800,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+#if FF_API_VOXWARE
+ AV_CODEC_ID_VOXWARE,
+#endif
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK,
+ AV_CODEC_ID_METASOUND,
+ AV_CODEC_ID_PAF_AUDIO,
+ AV_CODEC_ID_ON2AVC,
+ AV_CODEC_ID_DSS_SP,
+
+ AV_CODEC_ID_FFWAVESYNTH = 0x15800,
+ AV_CODEC_ID_SONIC,
+ AV_CODEC_ID_SONIC_LS,
+ AV_CODEC_ID_EVRC,
+ AV_CODEC_ID_SMV,
+ AV_CODEC_ID_DSD_LSBF,
+ AV_CODEC_ID_DSD_MSBF,
+ AV_CODEC_ID_DSD_LSBF_PLANAR,
+ AV_CODEC_ID_DSD_MSBF_PLANAR,
+ AV_CODEC_ID_4GV,
+ AV_CODEC_ID_INTERPLAY_ACM,
+ AV_CODEC_ID_XMA1,
+ AV_CODEC_ID_XMA2,
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+
+ AV_CODEC_ID_MICRODVD = 0x17800,
+ AV_CODEC_ID_EIA_608,
+ AV_CODEC_ID_JACOSUB,
+ AV_CODEC_ID_SAMI,
+ AV_CODEC_ID_REALTEXT,
+ AV_CODEC_ID_STL,
+ AV_CODEC_ID_SUBVIEWER1,
+ AV_CODEC_ID_SUBVIEWER,
+ AV_CODEC_ID_SUBRIP,
+ AV_CODEC_ID_WEBVTT,
+ AV_CODEC_ID_MPL2,
+ AV_CODEC_ID_VPLAYER,
+ AV_CODEC_ID_PJS,
+ AV_CODEC_ID_ASS,
+ AV_CODEC_ID_HDMV_TEXT_SUBTITLE,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+
+ AV_CODEC_ID_BINTEXT = 0x18800,
+ AV_CODEC_ID_XBIN,
+ AV_CODEC_ID_IDF,
+ AV_CODEC_ID_OTF,
+ AV_CODEC_ID_SMPTE_KLV,
+ AV_CODEC_ID_DVD_NAV,
+ AV_CODEC_ID_TIMED_ID3,
+ AV_CODEC_ID_BIN_DATA,
+
+
+ AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+ AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket
+};
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_descriptor_get()
+ */
+typedef struct AVCodecDescriptor {
+ enum AVCodecID id;
+ enum AVMediaType type;
+ /**
+ * Name of the codec described by this descriptor. It is non-empty and
+ * unique for each codec descriptor. It should contain alphanumeric
+ * characters and '_' only.
+ */
+ const char *name;
+ /**
+ * A more descriptive name for this codec. May be NULL.
+ */
+ const char *long_name;
+ /**
+ * Codec properties, a combination of AV_CODEC_PROP_* flags.
+ */
+ int props;
+ /**
+ * MIME type(s) associated with the codec.
+ * May be NULL; if not, a NULL-terminated array of MIME types.
+ * The first item is always non-NULL and is the preferred MIME type.
+ */
+ const char *const *mime_types;
+ /**
+ * If non-NULL, an array of profiles recognized for this codec.
+ * Terminated with FF_PROFILE_UNKNOWN.
+ */
+ const struct AVProfile *profiles;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS (1 << 2)
+/**
+ * Codec supports frame reordering. That is, the coded order (the order in which
+ * the encoded packets are output by the encoders / stored / input to the
+ * decoders) may be different from the presentation order of the corresponding
+ * frames.
+ *
+ * For codecs that do not have this property set, PTS and DTS should always be
+ * equal.
+ */
+#define AV_CODEC_PROP_REORDER (1 << 3)
+/**
+ * Subtitle codec is bitmap based
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.
+ */
+#define AV_CODEC_PROP_BITMAP_SUB (1 << 16)
+/**
+ * Subtitle codec is text based.
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.
+ */
+#define AV_CODEC_PROP_TEXT_SUB (1 << 17)
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define AV_INPUT_BUFFER_PADDING_SIZE 32
+
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define AV_INPUT_BUFFER_MIN_SIZE 16384
+
+#if FF_API_WITHOUT_PREFIX
+/**
+ * @deprecated use AV_INPUT_BUFFER_PADDING_SIZE instead
+ */
+#define FF_INPUT_BUFFER_PADDING_SIZE 32
+
+/**
+ * @deprecated use AV_INPUT_BUFFER_MIN_SIZE instead
+ */
+#define FF_MIN_BUFFER_SIZE 16384
+#endif /* FF_API_WITHOUT_PREFIX */
+
+/**
+ * @ingroup lavc_encoding
+ * motion estimation type.
+ * @deprecated use codec private option instead
+ */
+#if FF_API_MOTION_EST
+enum Motion_Est_ID {
+ ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed
+ ME_FULL,
+ ME_LOG,
+ ME_PHODS,
+ ME_EPZS, ///< enhanced predictive zonal search
+ ME_X1, ///< reserved for experiments
+ ME_HEX, ///< hexagon based search
+ ME_UMH, ///< uneven multi-hexagon search
+ ME_TESA, ///< transformed exhaustive search algorithm
+ ME_ITER=50, ///< iterative search
+};
+#endif
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
+};
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride{
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+#if FF_API_MAX_BFRAMES
+/**
+ * @deprecated there is no libavcodec-wide limit on the number of B-frames
+ */
+#define FF_MAX_B_FRAMES 16
+#endif
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define AV_CODEC_FLAG_UNALIGNED (1 << 0)
+/**
+ * Use fixed qscale.
+ */
+#define AV_CODEC_FLAG_QSCALE (1 << 1)
+/**
+ * 4 MV per MB allowed / advanced prediction for H.263.
+ */
+#define AV_CODEC_FLAG_4MV (1 << 2)
+/**
+ * Output even those frames that might be corrupted.
+ */
+#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3)
+/**
+ * Use qpel MC.
+ */
+#define AV_CODEC_FLAG_QPEL (1 << 4)
+/**
+ * Use internal 2pass ratecontrol in first pass mode.
+ */
+#define AV_CODEC_FLAG_PASS1 (1 << 9)
+/**
+ * Use internal 2pass ratecontrol in second pass mode.
+ */
+#define AV_CODEC_FLAG_PASS2 (1 << 10)
+/**
+ * loop filter.
+ */
+#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11)
+/**
+ * Only decode/encode grayscale.
+ */
+#define AV_CODEC_FLAG_GRAY (1 << 13)
+/**
+ * error[?] variables will be set during encoding.
+ */
+#define AV_CODEC_FLAG_PSNR (1 << 15)
+/**
+ * Input bitstream might be truncated at a random location
+ * instead of only at frame boundaries.
+ */
+#define AV_CODEC_FLAG_TRUNCATED (1 << 16)
+/**
+ * Use interlaced DCT.
+ */
+#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18)
+/**
+ * Force low delay.
+ */
+#define AV_CODEC_FLAG_LOW_DELAY (1 << 19)
+/**
+ * Place global headers in extradata instead of every keyframe.
+ */
+#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22)
+/**
+ * Use only bitexact stuff (except (I)DCT).
+ */
+#define AV_CODEC_FLAG_BITEXACT (1 << 23)
+/* Fx : Flag for h263+ extra options */
+/**
+ * H.263 advanced intra coding / MPEG-4 AC prediction
+ */
+#define AV_CODEC_FLAG_AC_PRED (1 << 24)
+/**
+ * interlaced motion estimation
+ */
+#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29)
+#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31)
+
+/**
+ * Allow non spec compliant speedup tricks.
+ */
+#define AV_CODEC_FLAG2_FAST (1 << 0)
+/**
+ * Skip bitstream encoding.
+ */
+#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2)
+/**
+ * Place global headers at every keyframe instead of in extradata.
+ */
+#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
+
+/**
+ * timecode is in drop frame format. DEPRECATED!!!!
+ */
+#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE (1 << 13)
+
+/**
+ * Input bitstream might be truncated at a packet boundaries
+ * instead of only at frame boundaries.
+ */
+#define AV_CODEC_FLAG2_CHUNKS (1 << 15)
+/**
+ * Discard cropping information from SPS.
+ */
+#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16)
+
+/**
+ * Show all frames before the first keyframe
+ */
+#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22)
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28)
+/**
+ * Do not skip samples and export skip information as frame side data
+ */
+#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29)
+
+/* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+/**
+ * Decoder can use draw_horiz_band callback.
+ */
+#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
+/**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+#define AV_CODEC_CAP_DR1 (1 << 1)
+#define AV_CODEC_CAP_TRUNCATED (1 << 3)
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define AV_CODEC_CAP_DELAY (1 << 5)
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
+
+#if FF_API_CAP_VDPAU
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define AV_CODEC_CAP_HWACCEL_VDPAU (1 << 7)
+#endif
+
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define AV_CODEC_CAP_SUBFRAMES (1 << 8)
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
+/**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+#define AV_CODEC_CAP_AUTO_THREADS (1 << 15)
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
+/**
+ * Codec is intra only.
+ */
+#define AV_CODEC_CAP_INTRA_ONLY 0x40000000
+/**
+ * Codec is lossless.
+ */
+#define AV_CODEC_CAP_LOSSLESS 0x80000000
+
+
+#if FF_API_WITHOUT_PREFIX
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define CODEC_FLAG_UNALIGNED AV_CODEC_FLAG_UNALIGNED
+#define CODEC_FLAG_QSCALE AV_CODEC_FLAG_QSCALE
+#define CODEC_FLAG_4MV AV_CODEC_FLAG_4MV
+#define CODEC_FLAG_OUTPUT_CORRUPT AV_CODEC_FLAG_OUTPUT_CORRUPT
+#define CODEC_FLAG_QPEL AV_CODEC_FLAG_QPEL
+#if FF_API_GMC
+/**
+ * @deprecated use the "gmc" private option of the libxvid encoder
+ */
+#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
+#endif
+#if FF_API_MV0
+/**
+ * @deprecated use the flag "mv0" in the "mpv_flags" private option of the
+ * mpegvideo encoders
+ */
+#define CODEC_FLAG_MV0 0x0040
+#endif
+#if FF_API_INPUT_PRESERVED
+/**
+ * @deprecated passing reference-counted frames to the encoders replaces this
+ * flag
+ */
+#define CODEC_FLAG_INPUT_PRESERVED 0x0100
+#endif
+#define CODEC_FLAG_PASS1 AV_CODEC_FLAG_PASS1
+#define CODEC_FLAG_PASS2 AV_CODEC_FLAG_PASS2
+#define CODEC_FLAG_GRAY AV_CODEC_FLAG_GRAY
+#if FF_API_EMU_EDGE
+/**
+ * @deprecated edges are not used/required anymore. I.e. this flag is now always
+ * set.
+ */
+#define CODEC_FLAG_EMU_EDGE 0x4000
+#endif
+#define CODEC_FLAG_PSNR AV_CODEC_FLAG_PSNR
+#define CODEC_FLAG_TRUNCATED AV_CODEC_FLAG_TRUNCATED
+
+#if FF_API_NORMALIZE_AQP
+/**
+ * @deprecated use the flag "naq" in the "mpv_flags" private option of the
+ * mpegvideo encoders
+ */
+#define CODEC_FLAG_NORMALIZE_AQP 0x00020000
+#endif
+#define CODEC_FLAG_INTERLACED_DCT AV_CODEC_FLAG_INTERLACED_DCT
+#define CODEC_FLAG_LOW_DELAY AV_CODEC_FLAG_LOW_DELAY
+#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER
+#define CODEC_FLAG_BITEXACT AV_CODEC_FLAG_BITEXACT
+#define CODEC_FLAG_AC_PRED AV_CODEC_FLAG_AC_PRED
+#define CODEC_FLAG_LOOP_FILTER AV_CODEC_FLAG_LOOP_FILTER
+#define CODEC_FLAG_INTERLACED_ME AV_CODEC_FLAG_INTERLACED_ME
+#define CODEC_FLAG_CLOSED_GOP AV_CODEC_FLAG_CLOSED_GOP
+#define CODEC_FLAG2_FAST AV_CODEC_FLAG2_FAST
+#define CODEC_FLAG2_NO_OUTPUT AV_CODEC_FLAG2_NO_OUTPUT
+#define CODEC_FLAG2_LOCAL_HEADER AV_CODEC_FLAG2_LOCAL_HEADER
+#define CODEC_FLAG2_DROP_FRAME_TIMECODE AV_CODEC_FLAG2_DROP_FRAME_TIMECODE
+#define CODEC_FLAG2_IGNORE_CROP AV_CODEC_FLAG2_IGNORE_CROP
+
+#define CODEC_FLAG2_CHUNKS AV_CODEC_FLAG2_CHUNKS
+#define CODEC_FLAG2_SHOW_ALL AV_CODEC_FLAG2_SHOW_ALL
+#define CODEC_FLAG2_EXPORT_MVS AV_CODEC_FLAG2_EXPORT_MVS
+#define CODEC_FLAG2_SKIP_MANUAL AV_CODEC_FLAG2_SKIP_MANUAL
+
+/* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+#define CODEC_CAP_DRAW_HORIZ_BAND AV_CODEC_CAP_DRAW_HORIZ_BAND ///< Decoder can use draw_horiz_band callback.
+/**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+#define CODEC_CAP_DR1 AV_CODEC_CAP_DR1
+#define CODEC_CAP_TRUNCATED AV_CODEC_CAP_TRUNCATED
+#if FF_API_XVMC
+/* Codec can export data for HW decoding. This flag indicates that
+ * the codec would call get_format() with list that might contain HW accelerated
+ * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them
+ * including raw image format.
+ * The application can use the passed context to determine bitstream version,
+ * chroma format, resolution etc.
+ */
+#define CODEC_CAP_HWACCEL 0x0010
+#endif /* FF_API_XVMC */
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define CODEC_CAP_DELAY AV_CODEC_CAP_DELAY
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME
+#if FF_API_CAP_VDPAU
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define CODEC_CAP_HWACCEL_VDPAU AV_CODEC_CAP_HWACCEL_VDPAU
+#endif
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define CODEC_CAP_SUBFRAMES AV_CODEC_CAP_SUBFRAMES
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define CODEC_CAP_EXPERIMENTAL AV_CODEC_CAP_EXPERIMENTAL
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define CODEC_CAP_CHANNEL_CONF AV_CODEC_CAP_CHANNEL_CONF
+#if FF_API_NEG_LINESIZES
+/**
+ * @deprecated no codecs use this capability
+ */
+#define CODEC_CAP_NEG_LINESIZES 0x0800
+#endif
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define CODEC_CAP_FRAME_THREADS AV_CODEC_CAP_FRAME_THREADS
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define CODEC_CAP_SLICE_THREADS AV_CODEC_CAP_SLICE_THREADS
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define CODEC_CAP_PARAM_CHANGE AV_CODEC_CAP_PARAM_CHANGE
+/**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+#define CODEC_CAP_AUTO_THREADS AV_CODEC_CAP_AUTO_THREADS
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define CODEC_CAP_VARIABLE_FRAME_SIZE AV_CODEC_CAP_VARIABLE_FRAME_SIZE
+/**
+ * Codec is intra only.
+ */
+#define CODEC_CAP_INTRA_ONLY AV_CODEC_CAP_INTRA_ONLY
+/**
+ * Codec is lossless.
+ */
+#define CODEC_CAP_LOSSLESS AV_CODEC_CAP_LOSSLESS
+
+/**
+ * HWAccel is experimental and is thus avoided in favor of non experimental
+ * codecs
+ */
+#define HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200
+#endif /* FF_API_WITHOUT_PREFIX */
+
+#if FF_API_MB_TYPE
+//The following defines may change, don't expect compatibility if you use them.
+#define MB_TYPE_INTRA4x4 0x0001
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific
+#define MB_TYPE_16x16 0x0008
+#define MB_TYPE_16x8 0x0010
+#define MB_TYPE_8x16 0x0020
+#define MB_TYPE_8x8 0x0040
+#define MB_TYPE_INTERLACED 0x0080
+#define MB_TYPE_DIRECT2 0x0100 //FIXME
+#define MB_TYPE_ACPRED 0x0200
+#define MB_TYPE_GMC 0x0400
+#define MB_TYPE_SKIP 0x0800
+#define MB_TYPE_P0L0 0x1000
+#define MB_TYPE_P1L0 0x2000
+#define MB_TYPE_P0L1 0x4000
+#define MB_TYPE_P1L1 0x8000
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT 0x00010000
+#define MB_TYPE_CBP 0x00020000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
+#endif
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan{
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+}AVPanScan;
+
+/**
+ * This structure describes the bitrate properties of an encoded bitstream. It
+ * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD
+ * parameters for H.264/HEVC.
+ */
+typedef struct AVCPBProperties {
+ /**
+ * Maximum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int max_bitrate;
+ /**
+ * Minimum bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int min_bitrate;
+ /**
+ * Average bitrate of the stream, in bits per second.
+ * Zero if unknown or unspecified.
+ */
+ int avg_bitrate;
+
+ /**
+ * The size of the buffer to which the ratecontrol is applied, in bits.
+ * Zero if unknown or unspecified.
+ */
+ int buffer_size;
+
+ /**
+ * The delay between the time the packet this structure is associated with
+ * is received and the time when it should be decoded, in periods of a 27MHz
+ * clock.
+ *
+ * UINT64_MAX when unknown or unspecified.
+ */
+ uint64_t vbv_delay;
+} AVCPBProperties;
+
+#if FF_API_QSCALE_TYPE
+#define FF_QSCALE_TYPE_MPEG1 0
+#define FF_QSCALE_TYPE_MPEG2 1
+#define FF_QSCALE_TYPE_H264 2
+#define FF_QSCALE_TYPE_VP56 3
+#endif
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+ AV_PKT_DATA_PALETTE,
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+
+ /**
+ * This side data should be associated with an audio stream and contains
+ * ReplayGain information in form of the AVReplayGain struct.
+ */
+ AV_PKT_DATA_REPLAYGAIN,
+
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the decoded video frames for
+ * correct presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_PKT_DATA_DISPLAYMATRIX,
+
+ /**
+ * This side data should be associated with a video stream and contains
+ * Stereoscopic 3D information in form of the AVStereo3D struct.
+ */
+ AV_PKT_DATA_STEREO3D,
+
+ /**
+ * This side data should be associated with an audio stream and corresponds
+ * to enum AVAudioServiceType.
+ */
+ AV_PKT_DATA_AUDIO_SERVICE_TYPE,
+
+ /**
+ * This side data contains quality related information from the encoder.
+ * @code
+ * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad).
+ * u8 picture type
+ * u8 error count
+ * u16 reserved
+ * u64le[error count] sum of squared differences between encoder in and output
+ * @endcode
+ */
+ AV_PKT_DATA_QUALITY_STATS,
+
+ /**
+ * This side data contains an integer value representing the stream index
+ * of a "fallback" track. A fallback track indicates an alternate
+ * track to use when the current track can not be decoded for some reason.
+ * e.g. no decoder available for codec.
+ */
+ AV_PKT_DATA_FALLBACK_TRACK,
+
+ /**
+ * This side data corresponds to the AVCPBProperties struct.
+ */
+ AV_PKT_DATA_CPB_PROPERTIES,
+
+ /**
+ * Recommmends skipping the specified number of samples
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_PKT_DATA_SKIP_SAMPLES=70,
+
+ /**
+ * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
+ * the packet may contain "dual mono" audio specific to Japanese DTV
+ * and if it is true, recommends only the selected channel to be used.
+ * @code
+ * u8 selected channels (0=mail/left, 1=sub/right, 2=both)
+ * @endcode
+ */
+ AV_PKT_DATA_JP_DUALMONO,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop.
+ */
+ AV_PKT_DATA_STRINGS_METADATA,
+
+ /**
+ * Subtitle event position
+ * @code
+ * u32le x1
+ * u32le y1
+ * u32le x2
+ * u32le y2
+ * @endcode
+ */
+ AV_PKT_DATA_SUBTITLE_POSITION,
+
+ /**
+ * Data found in BlockAdditional element of matroska container. There is
+ * no end marker for the data, so it is required to rely on the side data
+ * size to recognize the end. 8 byte id (as found in BlockAddId) followed
+ * by data.
+ */
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+ /**
+ * The optional first identifier line of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+ /**
+ * The optional settings (rendering instructions) that immediately
+ * follow the timestamp specifier of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_SETTINGS,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop. This
+ * side data includes updated metadata which appeared in the stream.
+ */
+ AV_PKT_DATA_METADATA_UPDATE,
+};
+
+#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED
+
+typedef struct AVPacketSideData {
+ uint8_t *data;
+ int size;
+ enum AVPacketSideDataType type;
+} AVPacketSideData;
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames. Encoders are allowed to output empty
+ * packets, with no compressed data, containing only side data
+ * (e.g. to update some stream parameters at the end of encoding).
+ *
+ * AVPacket is one of the few structs in FFmpeg, whose size is a part of public
+ * ABI. Thus it may be allocated on stack and no new fields can be added to it
+ * without libavcodec and libavformat major bump.
+ *
+ * The semantics of data ownership depends on the buf field.
+ * If it is set, the packet data is dynamically allocated and is
+ * valid indefinitely until a call to av_packet_unref() reduces the
+ * reference count to 0.
+ *
+ * If the buf field is not set av_packet_ref() would make a copy instead
+ * of increasing the reference count.
+ *
+ * The side data is always allocated with av_malloc(), copied by
+ * av_packet_ref() and freed by av_packet_unref().
+ *
+ * @see av_packet_ref
+ * @see av_packet_unref
+ */
+typedef struct AVPacket {
+ /**
+ * A reference to the reference-counted buffer where the packet data is
+ * stored.
+ * May be NULL, then the packet data is not reference-counted.
+ */
+ AVBufferRef *buf;
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t *data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ AVPacketSideData *side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int64_t duration;
+
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+#if FF_API_CONVERGENCE_DURATION
+ /**
+ * @deprecated Same as the duration field, but as int64_t. This was required
+ * for Matroska subtitles, whose duration values could overflow when the
+ * duration field was still an int.
+ */
+ attribute_deprecated
+ int64_t convergence_duration;
+#endif
+} AVPacket;
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+
+enum AVSideDataParamChangeFlags {
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+/**
+ * @}
+ */
+
+struct AVCodecInternal;
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, //< Top coded_first, top displayed first
+ AV_FIELD_BB, //< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, //< Top coded first, bottom displayed first
+ AV_FIELD_BT, //< Bottom coded first, top displayed first
+};
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user
+ * applications.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass *av_class;
+ int log_level_offset;
+
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ const struct AVCodec *codec;
+#if FF_API_CODEC_NAME
+ /**
+ * @deprecated this field is not used for anything in libavcodec
+ */
+ attribute_deprecated
+ char codec_name[32];
+#endif
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify the codec.
+ * If there are multiple such fields in a container then the demuxer should choose the one
+ * which maximizes the information about the used codec.
+ * If the codec tag field in a container is larger than 32 bits then the demuxer should
+ * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+ * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+ * first.
+ * - encoding: Set by user, if not then the default based on codec_id will be used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int codec_tag;
+
+#if FF_API_STREAM_CODEC_TAG
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated
+ unsigned int stream_codec_tag;
+#endif
+
+ void *priv_data;
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal *internal;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by user, may be overwritten by libavcodec
+ * if this info is available in the stream
+ */
+ int64_t bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * AV_CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * AV_CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * mjpeg: Huffman tables
+ * rv10: additional flags
+ * mpeg4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid problems if it is read with the bitstream reader.
+ * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t *extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * This often, but not always is the inverse of the frame rate or field rate
+ * for video.
+ * - encoding: MUST be set by user.
+ * - decoding: the use of this field for decoding is deprecated.
+ * Use framerate instead.
+ */
+ AVRational time_base;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Codec delay.
+ *
+ * Encoding: Number of frames delay there will be from the encoder input to
+ * the decoder output. (we assume the decoder matches the spec)
+ * Decoding: Number of frames delay in addition to what a standard decoder
+ * as specified in the spec would produce.
+ *
+ * Video:
+ * Number of frames the decoded output will be delayed relative to the
+ * encoded input.
+ *
+ * Audio:
+ * For encoding, this field is unused (see initial_padding).
+ *
+ * For decoding, this is the number of samples the decoder needs to
+ * output before the decoder's output is valid. When seeking, you should
+ * start decoding this many samples prior to your desired seek point.
+ *
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int delay;
+
+
+ /* video only */
+ /**
+ * picture width / height.
+ *
+ * @note Those fields may not match the values of the last
+ * AVFrame outputted by avcodec_decode_video2 due frame
+ * reordering.
+ *
+ * - encoding: MUST be set by user.
+ * - decoding: May be set by the user before opening the decoder if known e.g.
+ * from the container. Some decoders will require the dimensions
+ * to be set by the caller. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int width, height;
+
+ /**
+ * Bitstream width / height, may be different from width/height e.g. when
+ * the decoded frame is cropped before being output or lowres is enabled.
+ *
+ * @note Those field may not match the value of the last
+ * AVFrame outputted by avcodec_decode_video2 due frame
+ * reordering.
+ *
+ * - encoding: unused
+ * - decoding: May be set by the user before opening the decoder if known
+ * e.g. from the container. During decoding, the decoder may
+ * overwrite those values as required while parsing the data.
+ */
+ int coded_width, coded_height;
+
+#if FF_API_ASPECT_EXTENDED
+#define FF_ASPECT_EXTENDED 15
+#endif
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see AV_PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overridden by the decoder if it knows better.
+ *
+ * @note This field may not match the value of the last
+ * AVFrame outputted by avcodec_decode_video2 due frame
+ * reordering.
+ *
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec while
+ * parsing the data.
+ */
+ enum AVPixelFormat pix_fmt;
+
+#if FF_API_MOTION_EST
+ /**
+ * This option does nothing
+ * @deprecated use codec private options instead
+ */
+ attribute_deprecated int me_method;
+#endif
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext *s,
+ const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
+ int y, int type, int height);
+
+ /**
+ * callback to negotiate the pixelFormat
+ * @param fmt is the list of formats which are supported by the codec,
+ * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
+ * The first is always the native one.
+ * @note The callback may be called again immediately if initialization for
+ * the selected (hardware-accelerated) pixel format failed.
+ * @warning Behavior is undefined if the callback returns a value not
+ * in the fmt list of formats.
+ * @return the chosen format
+ * - encoding: unused
+ * - decoding: Set by user, if not set the native format will be chosen.
+ */
+ enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+#if FF_API_RC_STRATEGY
+ /** @deprecated use codec private option instead */
+ attribute_deprecated int rc_strategy;
+#define FF_RC_STRATEGY_XVID 1
+#endif
+
+ int b_frame_strategy;
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * 0-> h263 quant 1-> mpeg quant
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mpeg_quant;
+
+ /**
+ * qscale factor between P and I-frames
+ * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+ /**
+ * prediction method (needed for huffyuv)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int prediction_method;
+#define FF_PRED_LEFT 0
+#define FF_PRED_PLANE 1
+#define FF_PRED_MEDIAN 2
+
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int *slice_offset;
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * prepass for motion estimation
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_me;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+#if FF_API_AFD
+ /**
+ * DTG active format information (additional aspect ratio
+ * information only used in DVB MPEG-2 transport streams)
+ * 0 if not set.
+ *
+ * - encoding: unused
+ * - decoding: Set by decoder.
+ * @deprecated Deprecated in favor of AVSideData
+ */
+ attribute_deprecated int dtg_active_format;
+#define FF_DTG_AFD_SAME 8
+#define FF_DTG_AFD_4_3 9
+#define FF_DTG_AFD_16_9 10
+#define FF_DTG_AFD_14_9 11
+#define FF_DTG_AFD_4_3_SP_14_9 13
+#define FF_DTG_AFD_16_9_SP_14_9 14
+#define FF_DTG_AFD_SP_4_3 15
+#endif /* FF_API_AFD */
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+#if FF_API_QUANT_BIAS
+ /**
+ * @deprecated use encoder private option instead
+ */
+ attribute_deprecated int intra_quant_bias;
+#define FF_DEFAULT_QUANT_BIAS 999999
+
+ /**
+ * @deprecated use encoder private option instead
+ */
+ attribute_deprecated int inter_quant_bias;
+#endif
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+#if FF_API_XVMC
+ /**
+ * XVideo Motion Acceleration
+ * - encoding: forbidden
+ * - decoding: set by decoder
+ * @deprecated XvMC doesn't need it anymore.
+ */
+ attribute_deprecated int xvmc_acceleration;
+#endif /* FF_API_XVMC */
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *inter_matrix;
+
+ /**
+ * scene change detection threshold
+ * 0 is default, larger means fewer detected scene changes.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_threshold;
+
+ /**
+ * noise reduction strength
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int noise_reduction;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated
+ int me_threshold;
+
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated
+ int mb_threshold;
+#endif
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec
+ */
+ int intra_dc_precision;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ float border_masking;
+#endif
+
+ /**
+ * minimum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_penalty_compensation;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int brd_scale;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * chroma qp offset from luma
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chromaoffset;
+
+#if FF_API_UNUSED_MEMBERS
+ /**
+ * Multiplied by qscale for each frame and added to scene_change_score.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int scenechange_factor;
+#endif
+
+ /**
+ *
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Adjust sensitivity of b_frame_strategy 1.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int b_sensitivity;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by user.
+ */
+ enum AVFieldOrder field_order;
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+ int channels; ///< number of audio channels
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Number of samples per channel in an audio frame.
+ *
+ * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+ * except the last must contain exactly frame_size samples per channel.
+ * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then the
+ * frame size is not restricted.
+ * - decoding: may be set by some decoders to indicate constant frame size
+ */
+ int frame_size;
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int frame_number;
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by user, may be overwritten by libavcodec.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ uint64_t request_channel_layout;
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * desired sample format
+ * - encoding: Not used.
+ * - decoding: Set by user.
+ * Decoder will decode to this format if it can.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. What
+ * this means is, you may set however many entries in buf[] you feel necessary.
+ * Each buffer must be reference-counted using the AVBuffer API (see description
+ * of buf[] below).
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise exended_data must point to data
+ * - buf[] must contain one or more pointers to AVBufferRef structures. Each of
+ * the frame's data and extended_data pointers must be contained in these. That
+ * is, one AVBufferRef for each allocated chunk of memory, not necessarily one
+ * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(),
+ * and av_buffer_ref().
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * Some decoders do not support linesizes changing between frames.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * this callback may be called from a different thread, but not from more
+ * than one at once. Does not need to be reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+ /**
+ * If non-zero, the decoded audio and video frames returned from
+ * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
+ * and are valid indefinitely. The caller must free them with
+ * av_frame_unref() when they are not needed anymore.
+ * Otherwise, the decoded frames must not be freed by the caller and are
+ * only valid until the next decode call.
+ *
+ * - encoding: unused
+ * - decoding: set by the caller before avcodec_open2().
+ */
+ int refcounted_frames;
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ float rc_qsquish;
+
+ attribute_deprecated
+ float rc_qmod_amp;
+ attribute_deprecated
+ int rc_qmod_freq;
+#endif
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ int rc_override_count;
+ RcOverride *rc_override;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ const char *rc_eq;
+#endif
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: Set by user, may be overwritten by libavcodec.
+ */
+ int64_t rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t rc_min_rate;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ float rc_buffer_aggressivity;
+
+ attribute_deprecated
+ float rc_initial_cplx;
+#endif
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+#if FF_API_CODER_TYPE
+#define FF_CODER_TYPE_VLC 0
+#define FF_CODER_TYPE_AC 1
+#define FF_CODER_TYPE_RAW 2
+#define FF_CODER_TYPE_RLE 3
+#if FF_API_UNUSED_MEMBERS
+#define FF_CODER_TYPE_DEFLATE 4
+#endif /* FF_API_UNUSED_MEMBERS */
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ int coder_type;
+#endif /* FF_API_CODER_TYPE */
+
+ /**
+ * context model
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int context_model;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ int lmin;
+
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ int lmax;
+#endif
+
+ /**
+ * frame skip threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_threshold;
+
+ /**
+ * frame skip factor
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_factor;
+
+ /**
+ * frame skip exponent
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_exp;
+
+ /**
+ * frame skip comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_cmp;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int min_prediction_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_prediction_order;
+
+ /**
+ * GOP timecode frame start number
+ * - encoding: Set by user, in non drop frame format
+ * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset)
+ */
+ int64_t timecode_frame_start;
+
+#if FF_API_RTP_CALLBACK
+ /**
+ * @deprecated unused
+ */
+ /* The RTP callback: This function is called */
+ /* every time the encoder has a packet to send. */
+ /* It depends on the encoder if the data starts */
+ /* with a Start Code (it should). H.263 does. */
+ /* mb_nb contains the number of macroblocks */
+ /* encoded in the RTP payload. */
+ attribute_deprecated
+ void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
+#endif
+
+ int rtp_payload_size; /* The size of the RTP payload: the coder will */
+ /* do its best to deliver a chunk with size */
+ /* below rtp_payload_size, the chunk will start */
+ /* with a start code on some codecs like H.263. */
+ /* This doesn't take account of any particular */
+ /* headers inside the transmitted RTP payload. */
+
+#if FF_API_STAT_BITS
+ /* statistics, used for 2-pass encoding */
+ attribute_deprecated
+ int mv_bits;
+ attribute_deprecated
+ int header_bits;
+ attribute_deprecated
+ int i_tex_bits;
+ attribute_deprecated
+ int p_tex_bits;
+ attribute_deprecated
+ int i_count;
+ attribute_deprecated
+ int p_count;
+ attribute_deprecated
+ int skip_count;
+ attribute_deprecated
+ int misc_bits;
+
+ /** @deprecated this field is unused */
+ attribute_deprecated
+ int frame_bits;
+#endif
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char *stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char *stats_in;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#if FF_API_OLD_MSMPEG4
+#define FF_BUG_OLD_MSMPEG4 2
+#endif
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#if FF_API_AC_VLC
+#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default.
+#endif
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+
+ /**
+ * strictly follow the standard (MPEG4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ */
+ int strict_std_compliance;
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+#define FF_EC_FAVOR_INTER 256
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#if FF_API_DEBUG_MV
+/**
+ * @deprecated this option does nothing
+ */
+#define FF_DEBUG_MV 32
+#endif
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#if FF_API_UNUSED_MEMBERS
+#define FF_DEBUG_PTS 0x00000200
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#if FF_API_DEBUG_MV
+#define FF_DEBUG_VIS_QP 0x00002000 ///< only access through AVOptions from outside libavcodec
+#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec
+#endif
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+#define FF_DEBUG_GREEN_MD 0x00800000
+#define FF_DEBUG_NOMC 0x01000000
+
+#if FF_API_DEBUG_MV
+ /**
+ * debug
+ * Code outside libavcodec should access this field using AVOptions
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+#endif
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the codec) and print an error message on mismatch.
+ * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
+ * decoder returning an error.
+ */
+#define AV_EF_CRCCHECK (1<<0)
+#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations
+#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length
+#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection
+
+#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue
+#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
+#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors
+#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error
+
+
+ /**
+ * opaque 64bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ struct AVHWAccel *hwaccel;
+
+ /**
+ * Hardware accelerator context.
+ * For some hardware accelerators, a global context needs to be
+ * provided by the user. In that case, this holds display-dependent
+ * data FFmpeg cannot instantiate itself. Please refer to the
+ * FFmpeg HW accelerator documentation to know how to fill this
+ * is. e.g. for VA API, this is a struct vaapi_context.
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *hwaccel_context;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#if FF_API_ARCH_SH4
+#define FF_IDCT_SH4 9
+#endif
+#define FF_IDCT_SIMPLEARM 10
+#if FF_API_UNUSED_MEMBERS
+#define FF_IDCT_IPP 13
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_IDCT_XVID 14
+#if FF_API_IDCT_XVIDMMX
+#define FF_IDCT_XVIDMMX 14
+#endif /* FF_API_IDCT_XVIDMMX */
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#if FF_API_ARCH_SPARC
+#define FF_IDCT_SIMPLEVIS 18
+#endif
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_SIMPLENEON 22
+#if FF_API_ARCH_ALPHA
+#define FF_IDCT_SIMPLEALPHA 23
+#endif
+#define FF_IDCT_SIMPLEAUTO 128
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+#if FF_API_LOWRES
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_lowres(avctx)
+ */
+ int lowres;
+#endif
+
+#if FF_API_CODED_FRAME
+ /**
+ * the picture in the bitstream
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ *
+ * @deprecated use the quality factor packet side data instead
+ */
+ attribute_deprecated AVFrame *coded_frame;
+#endif
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
+ * so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * Set by the client if its custom get_buffer() callback can be called
+ * synchronously from another thread, which allows faster multithreaded decoding.
+ * draw_horiz_band() will be called from other threads regardless of this setting.
+ * Ignored if the default get_buffer() is used.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_safe_callbacks;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to count-1.
+ * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+ * two instances of func executing at the same time will have the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement where when any call to func
+ * returns < 0 no further calls to func may be done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+ /**
+ * noise vs. sse weight for the nsse comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+#define FF_PROFILE_AAC_HE 4
+#define FF_PROFILE_AAC_HE_V2 28
+#define FF_PROFILE_AAC_LD 22
+#define FF_PROFILE_AAC_ELD 38
+#define FF_PROFILE_MPEG2_AAC_LOW 128
+#define FF_PROFILE_MPEG2_AAC_HE 131
+
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+#define FF_PROFILE_DTS_EXPRESS 70
+
+#define FF_PROFILE_MPEG2_422 0
+#define FF_PROFILE_MPEG2_HIGH 1
+#define FF_PROFILE_MPEG2_SS 2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+#define FF_PROFILE_MPEG2_MAIN 4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE 66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN 77
+#define FF_PROFILE_H264_EXTENDED 88
+#define FF_PROFILE_H264_HIGH 100
+#define FF_PROFILE_H264_HIGH_10 110
+#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_422 122
+#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_444 144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444 44
+
+#define FF_PROFILE_VC1_SIMPLE 0
+#define FF_PROFILE_VC1_MAIN 1
+#define FF_PROFILE_VC1_COMPLEX 2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 0
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 1
+#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 2
+#define FF_PROFILE_JPEG2000_DCINEMA_2K 3
+#define FF_PROFILE_JPEG2000_DCINEMA_4K 4
+
+#define FF_PROFILE_VP9_0 0
+#define FF_PROFILE_VP9_1 1
+#define FF_PROFILE_VP9_2 2
+#define FF_PROFILE_VP9_3 3
+
+#define FF_PROFILE_HEVC_MAIN 1
+#define FF_PROFILE_HEVC_MAIN_10 2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+#define FF_PROFILE_HEVC_REXT 4
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ * Skip loop filtering for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ * Skip IDCT/dequantization for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ * Skip decoding for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ uint8_t *subtitle_header;
+ int subtitle_header_size;
+
+#if FF_API_ERROR_RATE
+ /**
+ * @deprecated use the 'error_rate' private AVOption of the mpegvideo
+ * encoders
+ */
+ attribute_deprecated
+ int error_rate;
+#endif
+
+#if FF_API_VBV_DELAY
+ /**
+ * VBV delay coded in the last frame (in periods of a 27 MHz clock).
+ * Used for compliant TS muxing.
+ * - encoding: Set by libavcodec.
+ * - decoding: unused.
+ * @deprecated this value is now exported as a part of
+ * AV_PKT_DATA_CPB_PROPERTIES packet side data
+ */
+ attribute_deprecated
+ uint64_t vbv_delay;
+#endif
+
+#if FF_API_SIDEDATA_ONLY_PKT
+ /**
+ * Encoding only and set by default. Allow encoders to output packets
+ * that do not contain any encoded data, only side data.
+ *
+ * Some encoders need to output such packets, e.g. to update some stream
+ * parameters at the end of encoding.
+ *
+ * @deprecated this field disables the default behaviour and
+ * it is kept only for compatibility.
+ */
+ attribute_deprecated
+ int side_data_only_packets;
+#endif
+
+ /**
+ * Audio only. The number of "priming" samples (padding) inserted by the
+ * encoder at the beginning of the audio. I.e. this number of leading
+ * decoded samples must be discarded by the caller to get the original audio
+ * without leading padding.
+ *
+ * - decoding: unused
+ * - encoding: Set by libavcodec. The timestamps on the output packets are
+ * adjusted by the encoder so that they always refer to the
+ * first sample of the data actually contained in the packet,
+ * including any added padding. E.g. if the timebase is
+ * 1/samplerate and the timestamp of the first input sample is
+ * 0, the timestamp of the first output packet will be
+ * -initial_padding.
+ */
+ int initial_padding;
+
+ /**
+ * - decoding: For codecs that store a framerate value in the compressed
+ * bitstream, the decoder may export it here. { 0, 1} when
+ * unknown.
+ * - encoding: unused
+ */
+ AVRational framerate;
+
+ /**
+ * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
+ * - encoding: unused.
+ * - decoding: Set by libavcodec before calling get_format()
+ */
+ enum AVPixelFormat sw_pix_fmt;
+
+ /**
+ * Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_pkt_timebase(avctx)
+ * - encoding unused.
+ * - decoding set by user.
+ */
+ AVRational pkt_timebase;
+
+ /**
+ * AVCodecDescriptor
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_codec_descriptor(avctx)
+ * - encoding: unused.
+ * - decoding: set by libavcodec.
+ */
+ const AVCodecDescriptor *codec_descriptor;
+
+#if !FF_API_LOWRES
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_lowres(avctx)
+ */
+ int lowres;
+#endif
+
+ /**
+ * Current statistics for PTS correction.
+ * - decoding: maintained and used by libavcodec, not intended to be used by user apps
+ * - encoding: unused
+ */
+ int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far
+ int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far
+ int64_t pts_correction_last_pts; /// PTS of the last frame
+ int64_t pts_correction_last_dts; /// DTS of the last frame
+
+ /**
+ * Character encoding of the input subtitles file.
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ char *sub_charenc;
+
+ /**
+ * Subtitles character encoding mode. Formats or codecs might be adjusting
+ * this setting (if they are doing the conversion themselves for instance).
+ * - decoding: set by libavcodec
+ * - encoding: unused
+ */
+ int sub_charenc_mode;
+#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)
+#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself
+#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
+
+ /**
+ * Skip processing alpha if supported by codec.
+ * Note that if the format uses pre-multiplied alpha (common with VP6,
+ * and recommended due to better video quality/compression)
+ * the image will look as if alpha-blended onto a black background.
+ * However for formats that do not use pre-multiplied alpha
+ * there might be serious artefacts (though e.g. libswscale currently
+ * assumes pre-multiplied alpha anyway).
+ * Code outside libavcodec should access this field using AVOptions
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int skip_alpha;
+
+ /**
+ * Number of samples to skip after a discontinuity
+ * - decoding: unused
+ * - encoding: set by libavcodec
+ */
+ int seek_preroll;
+
+#if !FF_API_DEBUG_MV
+ /**
+ * debug motion vectors
+ * Code outside libavcodec should access this field using AVOptions
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+#endif
+
+ /**
+ * custom intra quantization matrix
+ * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix()
+ * - encoding: Set by user, can be NULL.
+ * - decoding: unused.
+ */
+ uint16_t *chroma_intra_matrix;
+
+ /**
+ * dump format separator.
+ * can be ", " or "\n " or anything else
+ * Code outside libavcodec should access this field using AVOptions
+ * (NO direct access).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ uint8_t *dump_separator;
+
+ /**
+ * ',' separated list of allowed decoders.
+ * If NULL then all are allowed
+ * - encoding: unused
+ * - decoding: set by user through AVOPtions (NO direct access)
+ */
+ char *codec_whitelist;
+
+ /*
+ * Properties of the stream that gets decoded
+ * To be accessed through av_codec_get_properties() (NO direct access)
+ * - encoding: unused
+ * - decoding: set by libavcodec
+ */
+ unsigned properties;
+#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001
+#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002
+
+ /**
+ * Additional data associated with the entire coded stream.
+ *
+ * - decoding: unused
+ * - encoding: may be set by libavcodec after avcodec_open2().
+ */
+ AVPacketSideData *coded_side_data;
+ int nb_coded_side_data;
+
+} AVCodecContext;
+
+AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx);
+void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val);
+
+const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx);
+void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc);
+
+unsigned av_codec_get_codec_properties(const AVCodecContext *avctx);
+
+int av_codec_get_lowres(const AVCodecContext *avctx);
+void av_codec_set_lowres(AVCodecContext *avctx, int val);
+
+int av_codec_get_seek_preroll(const AVCodecContext *avctx);
+void av_codec_set_seek_preroll(AVCodecContext *avctx, int val);
+
+uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx);
+void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val);
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char *name; ///< short name for the profile
+} AVProfile;
+
+typedef struct AVCodecDefault AVCodecDefault;
+
+struct AVSubtitle;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see AV_CODEC_CAP_*
+ */
+ int capabilities;
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+ const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+ uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres()
+ const AVClass *priv_class; ///< AVClass for the private context
+ const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ int priv_data_size;
+ struct AVCodec *next;
+ /**
+ * @name Frame-level threading support functions
+ * @{
+ */
+ /**
+ * If defined, called on thread contexts when they are created.
+ * If the codec allocates writable tables in init(), re-allocate them here.
+ * priv_data will be set to a copy of the original.
+ */
+ int (*init_thread_copy)(AVCodecContext *);
+ /**
+ * Copy necessary context variables from a previous thread context to the current one.
+ * If not defined, the next thread will start automatically; otherwise, the codec
+ * must call ff_thread_finish_setup().
+ *
+ * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+ */
+ int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
+ /** @} */
+
+ /**
+ * Private codec-specific defaults.
+ */
+ const AVCodecDefault *defaults;
+
+ /**
+ * Initialize codec static data, called from avcodec_register().
+ */
+ void (*init_static_data)(struct AVCodec *codec);
+
+ int (*init)(AVCodecContext *);
+ int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,
+ const struct AVSubtitle *sub);
+ /**
+ * Encode data to an AVPacket.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket (may contain a user-provided buffer)
+ * @param[in] frame AVFrame containing the raw data to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
+ int *got_packet_ptr);
+ int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
+ int (*close)(AVCodecContext *);
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(AVCodecContext *);
+ /**
+ * Internal codec capabilities.
+ * See FF_CODEC_CAP_* in internal.h
+ */
+ int caps_internal;
+} AVCodec;
+
+int av_codec_get_max_lowres(const AVCodec *codec);
+
+struct MpegEncContext;
+
+/**
+ * @defgroup lavc_hwaccel AVHWAccel
+ * @{
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char *name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See AV_CODEC_ID_xxx
+ */
+ enum AVCodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ struct AVHWAccel *next;
+
+ /**
+ * Allocate a custom buffer
+ */
+ int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ * The only exception is XvMC, that works on MB level.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext *avctx);
+
+ /**
+ * Size of per-frame hardware accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int frame_priv_data_size;
+
+ /**
+ * Called for every Macroblock in a slice.
+ *
+ * XvMC uses it to replace the ff_mpv_decode_mb().
+ * Instead of decoding to raw picture, MB parameters are
+ * stored in an array provided by the video driver.
+ *
+ * @param s the mpeg context
+ */
+ void (*decode_mb)(struct MpegEncContext *s);
+
+ /**
+ * Initialize the hwaccel private data.
+ *
+ * This will be called from ff_get_format(), after hwaccel and
+ * hwaccel_context are set and the hwaccel private data in AVCodecInternal
+ * is allocated.
+ */
+ int (*init)(AVCodecContext *avctx);
+
+ /**
+ * Uninitialize the hwaccel private data.
+ *
+ * This will be called from get_format() or avcodec_close(), after hwaccel
+ * and hwaccel_context are already uninitialized.
+ */
+ int (*uninit)(AVCodecContext *avctx);
+
+ /**
+ * Size of the private data to allocate in
+ * AVCodecInternal.hwaccel_priv_data.
+ */
+ int priv_data_size;
+} AVHWAccel;
+
+/**
+ * Hardware acceleration should be used for decoding even if the codec level
+ * used is unknown or higher than the maximum supported level reported by the
+ * hardware driver.
+ *
+ * It's generally a good idea to pass this flag unless you have a specific
+ * reason not to, as hardware tends to under-report supported levels.
+ */
+#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)
+
+/**
+ * Hardware acceleration can output YUV pixel formats with a different chroma
+ * sampling than 4:2:0 and/or other than 8 bits per component.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1)
+
+/**
+ * @}
+ */
+
+#if FF_API_AVPICTURE
+/**
+ * @defgroup lavc_picture AVPicture
+ *
+ * Functions for working with AVPicture
+ * @{
+ */
+
+/**
+ * Picture data structure.
+ *
+ * Up to four components can be stored into it, the last component is
+ * alpha.
+ * @deprecated use AVFrame or imgutils functions instead
+ */
+typedef struct AVPicture {
+ attribute_deprecated
+ uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes
+ attribute_deprecated
+ int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
+} AVPicture;
+
+/**
+ * @}
+ */
+#endif
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+#if FF_API_AVPICTURE
+ /**
+ * @deprecated unused
+ */
+ attribute_deprecated
+ AVPicture pict;
+#endif
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * Can be set for text/ass as well once they are rendered.
+ */
+ uint8_t *data[4];
+ int linesize[4];
+
+ enum AVSubtitleType type;
+
+ char *text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The presentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char *ass;
+
+ int flags;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect **rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
+ */
+AVCodec *av_codec_next(const AVCodec *c);
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char *avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char *avcodec_license(void);
+
+/**
+ * Register the codec codec and initialize libavcodec.
+ *
+ * @warning either this function or avcodec_register_all() must be called
+ * before any other libavcodec functions.
+ *
+ * @see avcodec_register_all()
+ */
+void avcodec_register(AVCodec *codec);
+
+/**
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
+ *
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
+ */
+void avcodec_register_all(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct should be freed with avcodec_free_context().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ */
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
+
+/**
+ * Free the codec context and everything associated with it and write NULL to
+ * the provided pointer.
+ */
+void avcodec_free_context(AVCodecContext **avctx);
+
+/**
+ * Set the fields of the given AVCodecContext to default values corresponding
+ * to the given codec (defaults may be codec-dependent).
+ *
+ * Do not call this function if a non-NULL codec has been passed
+ * to avcodec_alloc_context3() that allocated this AVCodecContext.
+ * If codec is non-NULL, it is illegal to call avcodec_open2() with a
+ * different codec on this AVCodecContext.
+ */
+int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_class(void);
+
+/**
+ * Get the AVClass for AVFrame. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_frame_class(void);
+
+/**
+ * Get the AVClass for AVSubtitleRect. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_subtitle_rect_class(void);
+
+/**
+ * Copy the settings of the source AVCodecContext into the destination
+ * AVCodecContext. The resulting destination codec context will be
+ * unopened, i.e. you are required to call avcodec_open2() before you
+ * can use this AVCodecContext to decode/encode video/audio data.
+ *
+ * @param dest target codec context, should be initialized with
+ * avcodec_alloc_context3(NULL), but otherwise uninitialized
+ * @param src source codec context
+ * @return AVERROR() on error (e.g. memory allocation error), 0 on success
+ */
+int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @note Always call this function before using decoding routines (such as
+ * @ref avcodec_decode_video2()).
+ *
+ * @code
+ * avcodec_register_all();
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * avcodec_get_context_defaults3() for this context, then this
+ * parameter MUST be either NULL or equal to the previously passed
+ * codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ * On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ * av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() /
+ * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
+ * do nothing.
+ */
+int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_packet
+ * @{
+ */
+
+/**
+ * Allocate an AVPacket and set its fields to default values. The resulting
+ * struct must be freed using av_packet_free().
+ *
+ * @return An AVPacket filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVPacket itself, not the data buffers. Those
+ * must be allocated through other means such as av_new_packet.
+ *
+ * @see av_new_packet
+ */
+AVPacket *av_packet_alloc(void);
+
+/**
+ * Create a new packet that references the same data as src.
+ *
+ * This is a shortcut for av_packet_alloc()+av_packet_ref().
+ *
+ * @return newly created AVPacket on success, NULL on error.
+ *
+ * @see av_packet_alloc
+ * @see av_packet_ref
+ */
+AVPacket *av_packet_clone(AVPacket *src);
+
+/**
+ * Free the packet, if the packet is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param packet packet to be freed. The pointer will be set to NULL.
+ * @note passing NULL is a no-op.
+ */
+void av_packet_free(AVPacket **pkt);
+
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ */
+void av_init_packet(AVPacket *pkt);
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ * buf and destruct fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ * function returns successfully, the data is owned by the underlying AVBuffer.
+ * The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
+
+#if FF_API_AVPACKET_OLD_API
+/**
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated.
+ *
+ * @deprecated Use av_packet_ref
+ */
+attribute_deprecated
+int av_dup_packet(AVPacket *pkt);
+/**
+ * Copy packet, including contents
+ *
+ * @return 0 on success, negative AVERROR on fail
+ */
+int av_copy_packet(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Copy packet side data
+ *
+ * @return 0 on success, negative AVERROR on fail
+ */
+int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Free a packet.
+ *
+ * @deprecated Use av_packet_unref
+ *
+ * @param pkt packet to free
+ */
+attribute_deprecated
+void av_free_packet(AVPacket *pkt);
+#endif
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Wrap an existing array as a packet side data.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param data the side data array. It must be allocated with the av_malloc()
+ * family of functions. The ownership of the data is transferred to
+ * pkt.
+ * @param size side information size
+ * @return a non-negative number on success, a negative AVERROR code on
+ * failure. On failure, the packet is unchanged and the data remains
+ * owned by the caller.
+ */
+int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ uint8_t *data, size_t size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int *size);
+
+int av_packet_merge_side_data(AVPacket *pkt);
+
+int av_packet_split_side_data(AVPacket *pkt);
+
+const char *av_packet_side_data_name(enum AVPacketSideDataType type);
+
+/**
+ * Pack a dictionary for use in side_data.
+ *
+ * @param dict The dictionary to pack.
+ * @param size pointer to store the size of the returned data
+ * @return pointer to data if successful, NULL otherwise
+ */
+uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size);
+/**
+ * Unpack a dictionary from side_data.
+ *
+ * @param data data from side_data
+ * @param size size of the data
+ * @param dict the metadata storage dictionary
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict);
+
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket *pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_packet_ref(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket *pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket *dst, AVPacket *src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ *
+ */
+int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Convert valid timing fields (timestamps / durations) in a packet from one
+ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
+ * ignored.
+ *
+ * @param pkt packet on which the conversion will be performed
+ * @param tb_src source timebase, in which the timing fields in pkt are
+ * expressed
+ * @param tb_dst destination timebase, to which the timing fields will be
+ * converted
+ */
+void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder_by_name(const char *name);
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * AV_CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
+
+#if FF_API_EMU_EDGE
+/**
+ * Return the amount of padding in pixels which the get_buffer callback must
+ * provide around the edge of the image for codecs which do not have the
+ * CODEC_FLAG_EMU_EDGE flag.
+ *
+ * @return Required padding in pixels.
+ *
+ * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer
+ * needed
+ */
+attribute_deprecated
+unsigned avcodec_get_edge_width(void);
+#endif
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+/**
+ * Converts AVChromaLocation to swscale x/y chroma position.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
+
+/**
+ * Converts swscale x/y chroma position to AVChromaLocation.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame and the return value would be
+ * less than the packet size. In this case, avcodec_decode_audio4 has to be
+ * called again with an AVPacket containing the remaining data in order to
+ * decode the second frame, etc... Even if no frames are returned, the packet
+ * needs to be fed to the decoder with remaining data until it is completely
+ * consumed or an error occurs.
+ *
+ * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning samples. It is safe to flush even those decoders that are not
+ * marked with AV_CODEC_CAP_DELAY, then no samples will be returned.
+ *
+ * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ * The decoder will allocate a buffer for the decoded frame by
+ * calling the AVCodecContext.get_buffer2() callback.
+ * When AVCodecContext.refcounted_frames is set to 1, the frame is
+ * reference counted and the returned reference belongs to the
+ * caller. The caller must release the frame using av_frame_unref()
+ * when the frame is no longer needed. The caller may safely write
+ * to the frame if av_frame_is_writable() returns 1.
+ * When AVCodecContext.refcounted_frames is set to 0, the returned
+ * reference belongs to the decoder and is valid only until the
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ * non-zero. Note that this field being set to zero
+ * does not mean that an error has occurred. For
+ * decoders with AV_CODEC_CAP_DELAY set, no given decode
+ * call is guaranteed to produce a frame.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * At least avpkt->data and avpkt->size should be set. Some
+ * decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ * decoding, otherwise the number of bytes consumed from the input
+ * AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, const AVPacket *avpkt);
+
+/**
+ * Decode the video frame of size avpkt->size from avpkt->data into picture.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame.
+ *
+ * @warning The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note Codecs which have the AV_CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] picture The AVFrame in which the decoded video frame will be stored.
+ * Use av_frame_alloc() to get an AVFrame. The codec will
+ * allocate memory for the actual bitmap by calling the
+ * AVCodecContext.get_buffer2() callback.
+ * When AVCodecContext.refcounted_frames is set to 1, the frame is
+ * reference counted and the returned reference belongs to the
+ * caller. The caller must release the frame using av_frame_unref()
+ * when the frame is no longer needed. The caller may safely write
+ * to the frame if av_frame_is_writable() returns 1.
+ * When AVCodecContext.refcounted_frames is set to 0, the returned
+ * reference belongs to the decoder and is valid only until the
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
+ *
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields like
+ * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
+ * fields possible.
+ * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ const AVPacket *avpkt);
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expect to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform badly
+ * due to a potentially very different allocation pattern.
+ *
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning subtitles. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no subtitles will be returned.
+ *
+ * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()
+ * before packets may be fed to the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored,
+ * must be freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+ int *got_sub_ptr,
+ AVPacket *avpkt);
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+enum AVPictureStructure {
+ AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown
+ AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field
+ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field
+ AV_PICTURE_STRUCTURE_FRAME, //< coded as frame
+};
+
+typedef struct AVCodecParserContext {
+ void *priv_data;
+ struct AVCodecParser *parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+#define PARSER_FLAG_USE_CODEC_TS 0x1000
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+#if FF_API_CONVERGENCE_DURATION
+ /**
+ * @deprecated unused
+ */
+ attribute_deprecated
+ int64_t convergence_duration;
+#endif
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+
+ /**
+ * Duration of the current frame.
+ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+ * For all other types, this is in units of AVCodecContext.time_base.
+ */
+ int duration;
+
+ enum AVFieldOrder field_order;
+
+ /**
+ * Indicate whether a picture is coded as a frame, top field or bottom field.
+ *
+ * For example, H.264 field_pic_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
+ * equal to 1 and bottom_field_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_TOP_FIELD.
+ */
+ enum AVPictureStructure picture_structure;
+
+ /**
+ * Picture number incremented in presentation or output order.
+ * This field may be reinitialized at the first picture of a new sequence.
+ *
+ * For example, this corresponds to H.264 PicOrderCnt.
+ */
+ int output_picture_number;
+
+ /**
+ * Dimensions of the decoded video intended for presentation.
+ */
+ int width;
+ int height;
+
+ /**
+ * Dimensions of the coded video.
+ */
+ int coded_width;
+ int coded_height;
+
+ /**
+ * The format of the coded data, corresponds to enum AVPixelFormat for video
+ * and for enum AVSampleFormat for audio.
+ *
+ * Note that a decoder can have considerable freedom in how exactly it
+ * decodes the data, so the format reported here might be different from the
+ * one returned by a decoder.
+ */
+ int format;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[5]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext *s);
+ /* This callback never returns an error, a negative value means that
+ * the frame start was in a previous packet. */
+ int (*parser_parse)(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext *s);
+ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+ struct AVCodecParser *next;
+} AVCodecParser;
+
+AVCodecParser *av_parser_next(const AVCodecParser *c);
+
+void av_register_codec_parser(AVCodecParser *parser);
+AVCodecParserContext *av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
+ * @param buf input buffer.
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos);
+
+/**
+ * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed
+ * @deprecated use AVBitStreamFilter
+ */
+int av_parser_change(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_parser_close(AVCodecParserContext *s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+/**
+ * Encode a frame of audio.
+ *
+ * Takes input samples from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay, split, and combine input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. If avpkt->data and
+ * avpkt->size are set, avpkt->destruct must also be set. All
+ * other AVPacket fields will be reset by the encoder using
+ * av_init_packet(). If avpkt->data is NULL, the encoder will
+ * allocate it. The encoder will set avpkt->size to the size
+ * of the output packet.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_packet_unref().
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * AV_CODEC_CAP_DELAY capability set.
+ * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If it is not set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames except the last.
+ * The final frame may be smaller than avctx->frame_size.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+/**
+ * Encode a frame of video.
+ *
+ * Takes input raw video data from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay and reorder input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet. The returned data (if any) belongs to the
+ * caller, he is responsible for freeing it.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_packet_unref().
+ * @param[in] frame AVFrame containing the raw video data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * AV_CODEC_CAP_DELAY capability set.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub);
+
+
+/**
+ * @}
+ */
+
+#if FF_API_AVCODEC_RESAMPLE
+/**
+ * @defgroup lavc_resample Audio resampling
+ * @ingroup libavc
+ * @deprecated use libswresample instead
+ *
+ * @{
+ */
+struct ReSampleContext;
+struct AVResampleContext;
+
+typedef struct ReSampleContext ReSampleContext;
+
+/**
+ * Initialize audio resampling context.
+ *
+ * @param output_channels number of output channels
+ * @param input_channels number of input channels
+ * @param output_rate output sample rate
+ * @param input_rate input sample rate
+ * @param sample_fmt_out requested output sample format
+ * @param sample_fmt_in input sample format
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear if 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ * @return allocated ReSampleContext, NULL if error occurred
+ */
+attribute_deprecated
+ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
+ int output_rate, int input_rate,
+ enum AVSampleFormat sample_fmt_out,
+ enum AVSampleFormat sample_fmt_in,
+ int filter_length, int log2_phase_count,
+ int linear, double cutoff);
+
+attribute_deprecated
+int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
+
+/**
+ * Free resample context.
+ *
+ * @param s a non-NULL pointer to a resample context previously
+ * created with av_audio_resample_init()
+ */
+attribute_deprecated
+void audio_resample_close(ReSampleContext *s);
+
+
+/**
+ * Initialize an audio resampler.
+ * Note, if either rate is not an integer then simply scale both rates up so they are.
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear If 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ */
+attribute_deprecated
+struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
+
+/**
+ * Resample an array of samples using a previously configured context.
+ * @param src an array of unconsumed samples
+ * @param consumed the number of samples of src which have been consumed are returned here
+ * @param src_size the number of unconsumed samples available
+ * @param dst_size the amount of space in samples available in dst
+ * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
+ * @return the number of samples written in dst or -1 if an error occurred
+ */
+attribute_deprecated
+int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
+
+
+/**
+ * Compensate samplerate/timestamp drift. The compensation is done by changing
+ * the resampler parameters, so no audible clicks or similar distortions occur
+ * @param compensation_distance distance in output samples over which the compensation should be performed
+ * @param sample_delta number of output samples which should be output less
+ *
+ * example: av_resample_compensate(c, 10, 500)
+ * here instead of 510 samples only 500 samples would be output
+ *
+ * note, due to rounding the actual compensation might be slightly different,
+ * especially if the compensation_distance is large and the in_rate used during init is small
+ */
+attribute_deprecated
+void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
+attribute_deprecated
+void av_resample_close(struct AVResampleContext *c);
+
+/**
+ * @}
+ */
+#endif
+
+#if FF_API_AVPICTURE
+/**
+ * @addtogroup lavc_picture
+ * @{
+ */
+
+/**
+ * @deprecated unused
+ */
+attribute_deprecated
+int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * @deprecated unused
+ */
+attribute_deprecated
+void avpicture_free(AVPicture *picture);
+
+/**
+ * @deprecated use av_image_fill_arrays() instead.
+ */
+attribute_deprecated
+int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * @deprecated use av_image_copy_to_buffer() instead.
+ */
+attribute_deprecated
+int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt,
+ int width, int height,
+ unsigned char *dest, int dest_size);
+
+/**
+ * @deprecated use av_image_get_buffer_size() instead.
+ */
+attribute_deprecated
+int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * @deprecated av_image_copy() instead.
+ */
+attribute_deprecated
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * @deprecated unused
+ */
+attribute_deprecated
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * @deprecated unused
+ */
+attribute_deprecated
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,
+ int padtop, int padbottom, int padleft, int padright, int *color);
+
+/**
+ * @}
+ */
+#endif
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample
+ * for one that returns a failure code and continues in case of invalid
+ * pix_fmts.
+ *
+ * @param[in] pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_w
+ * @param[out] v_shift store log2_chroma_h
+ *
+ * @see av_pix_fmt_get_chroma_sub_sample
+ */
+
+void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+/**
+ * @deprecated see av_get_pix_fmt_loss()
+ */
+int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,
+ int has_alpha);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+
+/**
+ * @deprecated see av_find_best_pix_fmt_of_2()
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+ enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+
+attribute_deprecated
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
+enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+#else
+enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+ enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+#endif
+
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+/**
+ * @}
+ */
+
+#if FF_API_SET_DIMENSIONS
+/**
+ * @deprecated this function is not supposed to be used from outside of lavc
+ */
+attribute_deprecated
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+#endif
+
+/**
+ * Put a string representing the codec tag codec_tag in buf.
+ *
+ * @param buf buffer to place codec tag in
+ * @param buf_size size in bytes of buf
+ * @param codec_tag codec tag to assign
+ * @return the length of the string that would have been generated if
+ * enough space had been available, excluding the trailing null
+ */
+size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec_id the ID of the codec to which the requested profile belongs
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ *
+ * @note unlike av_get_profile_name(), which searches a list of profiles
+ * supported by a specific decoder or encoder implementation, this
+ * function searches the list of profiles from the AVCodecDescriptor
+ */
+const char *avcodec_profile_name(enum AVCodecID codec_id, int profile);
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+/**
+ * Fill AVFrame audio data and linesize pointers.
+ *
+ * The buffer buf must be a preallocated buffer with a size big enough
+ * to contain the specified samples amount. The filled AVFrame data
+ * pointers will point to this buffer.
+ *
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return >=0 on success, negative error code on failure
+ * @todo return the size in bytes required to store the samples in
+ * case of success, at the next libavutil bump
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align);
+
+/**
+ * Reset the internal decoder state / flush internal buffers. Should be called
+ * e.g. when seeking or when switching to a different stream.
+ *
+ * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0),
+ * this invalidates the frames previously returned from the decoder. When
+ * refcounted frames are used, the decoder just releases any references it might
+ * keep internally, but the caller's reference remains valid.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be endianness, 0 for little, 1 for big,
+ * -1 (or anything else) for native
+ * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
+ */
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
+
+
+typedef struct AVBitStreamFilterContext {
+ void *priv_data;
+ struct AVBitStreamFilter *filter;
+ AVCodecParserContext *parser;
+ struct AVBitStreamFilterContext *next;
+ /**
+ * Internal default arguments, used if NULL is passed to av_bitstream_filter_filter().
+ * Not for access by library users.
+ */
+ char *args;
+} AVBitStreamFilterContext;
+
+
+typedef struct AVBitStreamFilter {
+ const char *name;
+ int priv_data_size;
+ int (*filter)(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+ void (*close)(AVBitStreamFilterContext *bsfc);
+ struct AVBitStreamFilter *next;
+} AVBitStreamFilter;
+
+/**
+ * Register a bitstream filter.
+ *
+ * The filter will be accessible to the application code through
+ * av_bitstream_filter_next() or can be directly initialized with
+ * av_bitstream_filter_init().
+ *
+ * @see avcodec_register_all()
+ */
+void av_register_bitstream_filter(AVBitStreamFilter *bsf);
+
+/**
+ * Create and initialize a bitstream filter context given a bitstream
+ * filter name.
+ *
+ * The returned context must be freed with av_bitstream_filter_close().
+ *
+ * @param name the name of the bitstream filter
+ * @return a bitstream filter context if a matching filter was found
+ * and successfully initialized, NULL otherwise
+ */
+AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
+
+/**
+ * Filter bitstream.
+ *
+ * This function filters the buffer buf with size buf_size, and places the
+ * filtered buffer in the buffer pointed to by poutbuf.
+ *
+ * The output buffer must be freed by the caller.
+ *
+ * @param bsfc bitstream filter context created by av_bitstream_filter_init()
+ * @param avctx AVCodecContext accessed by the filter, may be NULL.
+ * If specified, this must point to the encoder context of the
+ * output stream the packet is sent to.
+ * @param args arguments which specify the filter configuration, may be NULL
+ * @param poutbuf pointer which is updated to point to the filtered buffer
+ * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes
+ * @param buf buffer containing the data to filter
+ * @param buf_size size in bytes of buf
+ * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data
+ * @return >= 0 in case of success, or a negative error code in case of failure
+ *
+ * If the return value is positive, an output buffer is allocated and
+ * is available in *poutbuf, and is distinct from the input buffer.
+ *
+ * If the return value is 0, the output buffer is not allocated and
+ * should be considered identical to the input buffer, or in case
+ * *poutbuf was set it points to the input buffer (not necessarily to
+ * its starting address).
+ */
+int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+
+/**
+ * Release bitstream filter context.
+ *
+ * @param bsf the bitstream filter context created with
+ * av_bitstream_filter_init(), can be NULL
+ */
+void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
+
+/**
+ * If f is NULL, return the first registered bitstream filter,
+ * if f is non-NULL, return the next registered bitstream filter
+ * after f, or NULL if f is the last one.
+ *
+ * This function can be used to iterate over all registered bitstream
+ * filters.
+ */
+AVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f);
+
+/* memory */
+
+/**
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.
+ *
+ * In addition the whole buffer will initially and after resizes
+ * be 0-initialized so that no uninitialized data will ever appear.
+ */
+void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Same behaviour av_fast_padded_malloc except that buffer will always
+ * be 0-initialized after call.
+ */
+void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+#if FF_API_MISSING_SAMPLE
+/**
+ * Log a generic warning message about a missing feature. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] feature string containing the name of the missing feature
+ * @param[in] want_sample indicates if samples are wanted which exhibit this feature.
+ * If want_sample is non-zero, additional verbage will be added to the log
+ * message which tells the user how to report samples to the development
+ * mailing list.
+ * @deprecated Use avpriv_report_missing_feature() instead.
+ */
+attribute_deprecated
+void av_log_missing_feature(void *avc, const char *feature, int want_sample);
+
+/**
+ * Log a generic warning message asking for a sample. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing an optional message, or NULL if no message
+ * @deprecated Use avpriv_request_sample() instead.
+ */
+attribute_deprecated
+void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);
+#endif /* FF_API_MISSING_SAMPLE */
+
+/**
+ * Register the hardware accelerator hwaccel.
+ */
+void av_register_hwaccel(AVHWAccel *hwaccel);
+
+/**
+ * If hwaccel is NULL, returns the first registered hardware accelerator,
+ * if hwaccel is non-NULL, returns the next registered hardware accelerator
+ * after hwaccel, or NULL if hwaccel is the last one.
+ */
+AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel);
+
+
+/**
+ * Lock operation used by lockmgr
+ */
+enum AVLockOp {
+ AV_LOCK_CREATE, ///< Create a mutex
+ AV_LOCK_OBTAIN, ///< Lock the mutex
+ AV_LOCK_RELEASE, ///< Unlock the mutex
+ AV_LOCK_DESTROY, ///< Free mutex resources
+};
+
+/**
+ * Register a user provided lock manager supporting the operations
+ * specified by AVLockOp. The "mutex" argument to the function points
+ * to a (void *) where the lockmgr should store/get a pointer to a user
+ * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the
+ * value left by the last call for all other ops. If the lock manager is
+ * unable to perform the op then it should leave the mutex in the same
+ * state as when it was called and return a non-zero value. However,
+ * when called with AV_LOCK_DESTROY the mutex will always be assumed to
+ * have been successfully destroyed. If av_lockmgr_register succeeds
+ * it will return a non-negative value, if it fails it will return a
+ * negative value and destroy all mutex and unregister all callbacks.
+ * av_lockmgr_register is not thread-safe, it must be called from a
+ * single thread before any calls which make use of locking are used.
+ *
+ * @param cb User defined callback. av_lockmgr_register invokes calls
+ * to this callback and the previously registered callback.
+ * The callback will be used to create more than one mutex
+ * each of which must be backed by its own underlying locking
+ * mechanism (i.e. do not use a single static object to
+ * implement your lock manager). If cb is set to NULL the
+ * lockmgr will be unregistered.
+ */
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * Get the name of a codec.
+ * @return a static string identifying the codec; never NULL
+ */
+const char *avcodec_get_name(enum AVCodecID id);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext *s);
+
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec *codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec *codec);
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ * exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
+
+/**
+ * Allocate a CPB properties structure and initialize its fields to default
+ * values.
+ *
+ * @param size if non-NULL, the size of the allocated struct will be written
+ * here. This is useful for embedding it in side data.
+ *
+ * @return the newly allocated struct or NULL on failure
+ */
+AVCPBProperties *av_cpb_properties_alloc(size_t *size);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avfft.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avfft.h
new file mode 100644
index 000000000..0c0f9b8d8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avfft.h
@@ -0,0 +1,118 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVFFT_H
+#define AVCODEC_AVFFT_H
+
+/**
+ * @file
+ * @ingroup lavc_fft
+ * FFT functions
+ */
+
+/**
+ * @defgroup lavc_fft FFT functions
+ * @ingroup lavc_misc
+ *
+ * @{
+ */
+
+typedef float FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext FFTContext;
+
+/**
+ * Set up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+FFTContext *av_fft_init(int nbits, int inverse);
+
+/**
+ * Do the permutation needed BEFORE calling ff_fft_calc().
+ */
+void av_fft_permute(FFTContext *s, FFTComplex *z);
+
+/**
+ * Do a complex FFT with the parameters defined in av_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+void av_fft_calc(FFTContext *s, FFTComplex *z);
+
+void av_fft_end(FFTContext *s);
+
+FFTContext *av_mdct_init(int nbits, int inverse, double scale);
+void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_end(FFTContext *s);
+
+/* Real Discrete Fourier Transform */
+
+enum RDFTransformType {
+ DFT_R2C,
+ IDFT_C2R,
+ IDFT_R2C,
+ DFT_C2R,
+};
+
+typedef struct RDFTContext RDFTContext;
+
+/**
+ * Set up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);
+void av_rdft_calc(RDFTContext *s, FFTSample *data);
+void av_rdft_end(RDFTContext *s);
+
+/* Discrete Cosine Transform */
+
+typedef struct DCTContext DCTContext;
+
+enum DCTTransformType {
+ DCT_II = 0,
+ DCT_III,
+ DCT_I,
+ DST_I,
+};
+
+/**
+ * Set up DCT.
+ *
+ * @param nbits size of the input array:
+ * (1 << nbits) for DCT-II, DCT-III and DST-I
+ * (1 << nbits) + 1 for DCT-I
+ * @param type the type of transform
+ *
+ * @note the first element of the input of DST-I is ignored
+ */
+DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
+void av_dct_calc(DCTContext *s, FFTSample *data);
+void av_dct_end (DCTContext *s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVFFT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vaapi.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vaapi.h
new file mode 100644
index 000000000..7a29f6f88
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vaapi.h
@@ -0,0 +1,189 @@
+/*
+ * Video Acceleration API (shared data between FFmpeg and the video player)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2008-2009 Splitted-Desktop Systems
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_H
+#define AVCODEC_VAAPI_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vaapi
+ * Public libavcodec VA API header.
+ */
+
+#include <stdint.h>
+#include "libavutil/attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
+ * @ingroup lavc_codec_hwaccel
+ * @{
+ */
+
+/**
+ * This structure is used to share data between the FFmpeg library and
+ * the client video application.
+ * This shall be zero-allocated and available as
+ * AVCodecContext.hwaccel_context. All user members can be set once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ */
+struct vaapi_context {
+ /**
+ * Window system dependent data
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *display;
+
+ /**
+ * Configuration ID
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t config_id;
+
+ /**
+ * Context ID (video decode pipeline)
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t context_id;
+
+#if FF_API_VAAPI_CONTEXT
+ /**
+ * VAPictureParameterBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ uint32_t pic_param_buf_id;
+
+ /**
+ * VAIQMatrixBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ uint32_t iq_matrix_buf_id;
+
+ /**
+ * VABitPlaneBuffer ID (for VC-1 decoding)
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ uint32_t bitplane_buf_id;
+
+ /**
+ * Slice parameter/data buffer IDs
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ uint32_t *slice_buf_ids;
+
+ /**
+ * Number of effective slice buffer IDs to send to the HW
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ unsigned int n_slice_buf_ids;
+
+ /**
+ * Size of pre-allocated slice_buf_ids
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ unsigned int slice_buf_ids_alloc;
+
+ /**
+ * Pointer to VASliceParameterBuffers
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ void *slice_params;
+
+ /**
+ * Size of a VASliceParameterBuffer element
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ unsigned int slice_param_size;
+
+ /**
+ * Size of pre-allocated slice_params
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ unsigned int slice_params_alloc;
+
+ /**
+ * Number of slices currently filled in
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ unsigned int slice_count;
+
+ /**
+ * Pointer to slice data buffer base
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ const uint8_t *slice_data;
+
+ /**
+ * Current size of slice data
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ attribute_deprecated
+ uint32_t slice_data_size;
+#endif
+};
+
+/* @} */
+
+#endif /* AVCODEC_VAAPI_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vdpau.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vdpau.h
new file mode 100644
index 000000000..e85e4d9e9
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vdpau.h
@@ -0,0 +1,253 @@
+/*
+ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
+ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
+ *
+ * Copyright (C) 2008 NVIDIA
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDPAU_H
+#define AVCODEC_VDPAU_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vdpau
+ * Public libavcodec VDPAU header.
+ */
+
+
+/**
+ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
+ * @ingroup lavc_codec_hwaccel
+ *
+ * VDPAU hardware acceleration has two modules
+ * - VDPAU decoding
+ * - VDPAU presentation
+ *
+ * The VDPAU decoding module parses all headers using FFmpeg
+ * parsing mechanisms and uses VDPAU for the actual decoding.
+ *
+ * As per the current implementation, the actual decoding
+ * and rendering (API calls) are done as part of the VDPAU
+ * presentation (vo_vdpau.c) module.
+ *
+ * @{
+ */
+
+#include <vdpau/vdpau.h>
+#include <vdpau/vdpau_x11.h>
+#include "libavutil/avconfig.h"
+#include "libavutil/attributes.h"
+
+#include "avcodec.h"
+#include "version.h"
+
+#if FF_API_BUFS_VDPAU
+union AVVDPAUPictureInfo {
+ VdpPictureInfoH264 h264;
+ VdpPictureInfoMPEG1Or2 mpeg;
+ VdpPictureInfoVC1 vc1;
+ VdpPictureInfoMPEG4Part2 mpeg4;
+};
+#endif
+
+struct AVCodecContext;
+struct AVFrame;
+
+typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *,
+ const VdpPictureInfo *, uint32_t,
+ const VdpBitstreamBuffer *);
+
+/**
+ * This structure is used to share data between the libavcodec library and
+ * the client video application.
+ * The user shall allocate the structure via the av_alloc_vdpau_hwaccel
+ * function and make it available as
+ * AVCodecContext.hwaccel_context. Members can be set by the user once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ *
+ * The size of this structure is not a part of the public ABI and must not
+ * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an
+ * AVVDPAUContext.
+ */
+typedef struct AVVDPAUContext {
+ /**
+ * VDPAU decoder handle
+ *
+ * Set by user.
+ */
+ VdpDecoder decoder;
+
+ /**
+ * VDPAU decoder render callback
+ *
+ * Set by the user.
+ */
+ VdpDecoderRender *render;
+
+#if FF_API_BUFS_VDPAU
+ /**
+ * VDPAU picture information
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ union AVVDPAUPictureInfo info;
+
+ /**
+ * Allocated size of the bitstream_buffers table.
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ int bitstream_buffers_allocated;
+
+ /**
+ * Useful bitstream buffers in the bitstream buffers table.
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ int bitstream_buffers_used;
+
+ /**
+ * Table of bitstream buffers.
+ * The user is responsible for freeing this buffer using av_freep().
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ VdpBitstreamBuffer *bitstream_buffers;
+#endif
+ AVVDPAU_Render2 render2;
+} AVVDPAUContext;
+
+/**
+ * @brief allocation function for AVVDPAUContext
+ *
+ * Allows extending the struct without breaking API/ABI
+ */
+AVVDPAUContext *av_alloc_vdpaucontext(void);
+
+AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *);
+void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2);
+
+/**
+ * Associate a VDPAU device with a codec context for hardware acceleration.
+ * This function is meant to be called from the get_format() codec callback,
+ * or earlier. It can also be called after avcodec_flush_buffers() to change
+ * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent
+ * display preemption).
+ *
+ * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes
+ * successfully.
+ *
+ * @param avctx decoding context whose get_format() callback is invoked
+ * @param device VDPAU device handle to use for hardware acceleration
+ * @param get_proc_address VDPAU device driver
+ * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags
+ *
+ * @return 0 on success, an AVERROR code on failure.
+ */
+int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
+ VdpGetProcAddress *get_proc_address, unsigned flags);
+
+/**
+ * Gets the parameters to create an adequate VDPAU video surface for the codec
+ * context using VDPAU hardware decoding acceleration.
+ *
+ * @note Behavior is undefined if the context was not successfully bound to a
+ * VDPAU device using av_vdpau_bind_context().
+ *
+ * @param avctx the codec context being used for decoding the stream
+ * @param type storage space for the VDPAU video surface chroma type
+ * (or NULL to ignore)
+ * @param width storage space for the VDPAU video surface pixel width
+ * (or NULL to ignore)
+ * @param height storage space for the VDPAU video surface pixel height
+ * (or NULL to ignore)
+ *
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type,
+ uint32_t *width, uint32_t *height);
+
+/**
+ * Allocate an AVVDPAUContext.
+ *
+ * @return Newly-allocated AVVDPAUContext or NULL on failure.
+ */
+AVVDPAUContext *av_vdpau_alloc_context(void);
+
+#if FF_API_VDPAU_PROFILE
+/**
+ * Get a decoder profile that should be used for initializing a VDPAU decoder.
+ * Should be called from the AVCodecContext.get_format() callback.
+ *
+ * @deprecated Use av_vdpau_bind_context() instead.
+ *
+ * @param avctx the codec context being used for decoding the stream
+ * @param profile a pointer into which the result will be written on success.
+ * The contents of profile are undefined if this function returns
+ * an error.
+ *
+ * @return 0 on success (non-negative), a negative AVERROR on failure.
+ */
+attribute_deprecated
+int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
+#endif
+
+#if FF_API_CAP_VDPAU
+/** @brief The videoSurface is used for rendering. */
+#define FF_VDPAU_STATE_USED_FOR_RENDER 1
+
+/**
+ * @brief The videoSurface is needed for reference/prediction.
+ * The codec manipulates this.
+ */
+#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2
+
+/**
+ * @brief This structure is used as a callback between the FFmpeg
+ * decoder (vd_) and presentation (vo_) module.
+ * This is used for defining a video frame containing surface,
+ * picture parameter, bitstream information etc which are passed
+ * between the FFmpeg decoder and its clients.
+ */
+struct vdpau_render_state {
+ VdpVideoSurface surface; ///< Used as rendered surface, never changed.
+
+ int state; ///< Holds FF_VDPAU_STATE_* values.
+
+ /** picture parameter information for all supported codecs */
+ union AVVDPAUPictureInfo info;
+
+ /** Describe size/location of the compressed video data.
+ Set to 0 when freeing bitstream_buffers. */
+ int bitstream_buffers_allocated;
+ int bitstream_buffers_used;
+ /** The user is responsible for freeing this buffer using av_freep(). */
+ VdpBitstreamBuffer *bitstream_buffers;
+};
+#endif
+
+/* @}*/
+
+#endif /* AVCODEC_VDPAU_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/version.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/version.h
new file mode 100644
index 000000000..2b01e6f91
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/version.h
@@ -0,0 +1,210 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#include "libavutil/version.h"
+
+#define LIBAVCODEC_VERSION_MAJOR 57
+#define LIBAVCODEC_VERSION_MINOR 22
+#define LIBAVCODEC_VERSION_MICRO 100
+
+#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @note, when bumping the major version it is recommended to manually
+ * disable each FF_API_* in its own commit instead of disabling them all
+ * at once through the bump. This improves the git bisect-ability of the change.
+ */
+
+#ifndef FF_API_VIMA_DECODER
+#define FF_API_VIMA_DECODER (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AUDIO_CONVERT
+#define FF_API_AUDIO_CONVERT (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AVCODEC_RESAMPLE
+#define FF_API_AVCODEC_RESAMPLE FF_API_AUDIO_CONVERT
+#endif
+#ifndef FF_API_MISSING_SAMPLE
+#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_LOWRES
+#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_CAP_VDPAU
+#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_BUFS_VDPAU
+#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_VOXWARE
+#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_SET_DIMENSIONS
+#define FF_API_SET_DIMENSIONS (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_DEBUG_MV
+#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AC_VLC
+#define FF_API_AC_VLC (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_OLD_MSMPEG4
+#define FF_API_OLD_MSMPEG4 (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_ASPECT_EXTENDED
+#define FF_API_ASPECT_EXTENDED (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_ARCH_ALPHA
+#define FF_API_ARCH_ALPHA (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_ERROR_RATE
+#define FF_API_ERROR_RATE (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_QSCALE_TYPE
+#define FF_API_QSCALE_TYPE (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_MB_TYPE
+#define FF_API_MB_TYPE (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_MAX_BFRAMES
+#define FF_API_MAX_BFRAMES (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_NEG_LINESIZES
+#define FF_API_NEG_LINESIZES (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_EMU_EDGE
+#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_ARCH_SH4
+#define FF_API_ARCH_SH4 (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_ARCH_SPARC
+#define FF_API_ARCH_SPARC (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_UNUSED_MEMBERS
+#define FF_API_UNUSED_MEMBERS (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_IDCT_XVIDMMX
+#define FF_API_IDCT_XVIDMMX (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_INPUT_PRESERVED
+#define FF_API_INPUT_PRESERVED (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_NORMALIZE_AQP
+#define FF_API_NORMALIZE_AQP (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_GMC
+#define FF_API_GMC (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_MV0
+#define FF_API_MV0 (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_CODEC_NAME
+#define FF_API_CODEC_NAME (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AFD
+#define FF_API_AFD (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_VISMV
+/* XXX: don't forget to drop the -vismv documentation */
+#define FF_API_VISMV (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AUDIOENC_DELAY
+#define FF_API_AUDIOENC_DELAY (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_VAAPI_CONTEXT
+#define FF_API_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AVCTX_TIMEBASE
+#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_MPV_OPT
+#define FF_API_MPV_OPT (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_STREAM_CODEC_TAG
+#define FF_API_STREAM_CODEC_TAG (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_QUANT_BIAS
+#define FF_API_QUANT_BIAS (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_RC_STRATEGY
+#define FF_API_RC_STRATEGY (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_CODED_FRAME
+#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_MOTION_EST
+#define FF_API_MOTION_EST (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_WITHOUT_PREFIX
+#define FF_API_WITHOUT_PREFIX (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_SIDEDATA_ONLY_PKT
+#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_VDPAU_PROFILE
+#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_CONVERGENCE_DURATION
+#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_AVPICTURE
+#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_AVPACKET_OLD_API
+#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_RTP_CALLBACK
+#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_VBV_DELAY
+#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_CODER_TYPE
+#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_STAT_BITS
+#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+
+#endif /* AVCODEC_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/attributes.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/attributes.h
new file mode 100644
index 000000000..5c6b9deec
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/attributes.h
@@ -0,0 +1,168 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
+# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y))
+#else
+# define AV_GCC_VERSION_AT_LEAST(x,y) 0
+# define AV_GCC_VERSION_AT_MOST(x,y) 0
+#endif
+
+#ifndef av_always_inline
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_always_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+# define av_always_inline __forceinline
+#else
+# define av_always_inline inline
+#endif
+#endif
+
+#ifndef av_extern_inline
+#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
+# define av_extern_inline extern inline
+#else
+# define av_extern_inline inline
+#endif
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,4)
+# define av_warn_unused_result __attribute__((warn_unused_result))
+#else
+# define av_warn_unused_result
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_noinline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define av_noinline __declspec(noinline)
+#else
+# define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,6)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define attribute_deprecated __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define attribute_deprecated __declspec(deprecated)
+#else
+# define attribute_deprecated
+#endif
+
+/**
+ * Disable warnings about deprecated features
+ * This is useful for sections of code kept for backward compatibility and
+ * scheduled for removal.
+ */
+#ifndef AV_NOWARN_DEPRECATED
+#if AV_GCC_VERSION_AT_LEAST(4,6)
+# define AV_NOWARN_DEPRECATED(code) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
+ code \
+ _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+# define AV_NOWARN_DEPRECATED(code) \
+ __pragma(warning(push)) \
+ __pragma(warning(disable : 4996)) \
+ code; \
+ __pragma(warning(pop))
+#else
+# define AV_NOWARN_DEPRECATED(code) code
+#endif
+#endif
+
+
+#if defined(__GNUC__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,3)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
+# define av_uninit(x) x=x
+#else
+# define av_uninit(x) x
+#endif
+
+#ifdef __GNUC__
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,5)
+# define av_noreturn __attribute__((noreturn))
+#else
+# define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avconfig.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avconfig.h
new file mode 100644
index 000000000..36a8cd14d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avconfig.h
@@ -0,0 +1,7 @@
+/* Generated by ffconf */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 1
+#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0
+#endif /* AVUTIL_AVCONFIG_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avutil.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avutil.h
new file mode 100644
index 000000000..9bcf67412
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avutil.h
@@ -0,0 +1,343 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * external API header
+ */
+
+/**
+ * @mainpage
+ *
+ * @section ffmpeg_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by FFmpeg.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @ref lavfi "libavfilter" graph-based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lswr "libswresample" audio resampling, format conversion and mixing
+ * @li @ref lpp "libpostproc" post processing library
+ * @li @ref libsws "libswscale" color conversion and scaling library
+ *
+ * @section ffmpeg_versioning Versioning and compatibility
+ *
+ * Each of the FFmpeg libraries contains a version.h header, which defines a
+ * major, minor and micro version number with the
+ * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version
+ * number is incremented with backward incompatible changes - e.g. removing
+ * parts of the public API, reordering public struct members, etc. The minor
+ * version number is incremented for backward compatible API changes or major
+ * new features - e.g. adding a new public function or a new decoder. The micro
+ * version number is incremented for smaller changes that a calling program
+ * might still want to check for - e.g. changing behavior in a previously
+ * unspecified situation.
+ *
+ * FFmpeg guarantees backward API and ABI compatibility for each library as long
+ * as its major version number is unchanged. This means that no public symbols
+ * will be removed or renamed. Types and names of the public struct members and
+ * values of public macros and enums will remain the same (unless they were
+ * explicitly declared as not part of the public API). Documented behavior will
+ * not change.
+ *
+ * In other words, any correct program that works with a given FFmpeg snapshot
+ * should work just as well without any changes with any later snapshot with the
+ * same major versions. This applies to both rebuilding the program against new
+ * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program
+ * links against.
+ *
+ * However, new public symbols may be added and new members may be appended to
+ * public structs whose size is not part of public ABI (most public structs in
+ * FFmpeg). New macros and enum values may be added. Behavior in undocumented
+ * situations may change slightly (and be documented). All those are accompanied
+ * by an entry in doc/APIchanges and incrementing either the minor or micro
+ * version number.
+ */
+
+/**
+ * @defgroup lavu Common utility functions
+ *
+ * @brief
+ * libavutil contains the code shared across all the other FFmpeg
+ * libraries
+ *
+ * @note In order to use the functions provided by avutil you must include
+ * the specific header.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Maths
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_log Logging Facility
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup version_utils Library Version Macros
+ *
+ * @{
+ *
+ * @}
+ */
+
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return an informative version string. This usually is the actual release
+ * version number or a git commit description. This string has no fixed format
+ * and can change any time. It should never be parsed by code.
+ */
+const char *av_version_info(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char *avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char *avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * Return a string describing the media_type enum, NULL if media_type
+ * is unknown.
+ */
+const char *av_get_media_type_string(enum AVMediaType media_type);
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256*128-1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * FFmpeg internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE ((int64_t)UINT64_C(0x8000000000000000))
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE 1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_NONE = 0, ///< Undefined
+ AV_PICTURE_TYPE_I, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "common.h"
+#include "error.h"
+#include "rational.h"
+#include "version.h"
+#include "macros.h"
+#include "mathematics.h"
+#include "log.h"
+#include "pixfmt.h"
+
+/**
+ * Return x default pointer in case p is NULL.
+ */
+static inline void *av_x_if_null(const void *p, const void *x)
+{
+ return (void *)(intptr_t)(p ? p : x);
+}
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param elsize size in bytes of each list element (only 1, 2, 4 or 8)
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+unsigned av_int_list_length_for_size(unsigned elsize,
+ const void *list, uint64_t term) av_pure;
+
+/**
+ * Compute the length of an integer list.
+ *
+ * @param term list terminator (usually 0 or -1)
+ * @param list pointer to the list
+ * @return length of the list, in elements, not counting the terminator
+ */
+#define av_int_list_length(list, term) \
+ av_int_list_length_for_size(sizeof(*(list)), list, term)
+
+/**
+ * Open a file using a UTF-8 filename.
+ * The API of this function matches POSIX fopen(), errors are returned through
+ * errno.
+ */
+FILE *av_fopen_utf8(const char *path, const char *mode);
+
+/**
+ * Return the fractional representation of the internal time base.
+ */
+AVRational av_get_time_base_q(void);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/buffer.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/buffer.h
new file mode 100644
index 000000000..b4399fd39
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/buffer.h
@@ -0,0 +1,274 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_buffer
+ * refcounted data buffer API
+ */
+
+#ifndef AVUTIL_BUFFER_H
+#define AVUTIL_BUFFER_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_buffer AVBuffer
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBuffer is an API for reference-counted data buffers.
+ *
+ * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer
+ * represents the data buffer itself; it is opaque and not meant to be accessed
+ * by the caller directly, but only through AVBufferRef. However, the caller may
+ * e.g. compare two AVBuffer pointers to check whether two different references
+ * are describing the same data buffer. AVBufferRef represents a single
+ * reference to an AVBuffer and it is the object that may be manipulated by the
+ * caller directly.
+ *
+ * There are two functions provided for creating a new AVBuffer with a single
+ * reference -- av_buffer_alloc() to just allocate a new buffer, and
+ * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing
+ * reference, additional references may be created with av_buffer_ref().
+ * Use av_buffer_unref() to free a reference (this will automatically free the
+ * data once all the references are freed).
+ *
+ * The convention throughout this API and the rest of FFmpeg is such that the
+ * buffer is considered writable if there exists only one reference to it (and
+ * it has not been marked as read-only). The av_buffer_is_writable() function is
+ * provided to check whether this is true and av_buffer_make_writable() will
+ * automatically create a new writable buffer when necessary.
+ * Of course nothing prevents the calling code from violating this convention,
+ * however that is safe only when all the existing references are under its
+ * control.
+ *
+ * @note Referencing and unreferencing the buffers is thread-safe and thus
+ * may be done from multiple threads simultaneously without any need for
+ * additional locking.
+ *
+ * @note Two different references to the same buffer can point to different
+ * parts of the buffer (i.e. their AVBufferRef.data will not be equal).
+ */
+
+/**
+ * A reference counted buffer type. It is opaque and is meant to be used through
+ * references (AVBufferRef).
+ */
+typedef struct AVBuffer AVBuffer;
+
+/**
+ * A reference to a data buffer.
+ *
+ * The size of this struct is not a part of the public ABI and it is not meant
+ * to be allocated directly.
+ */
+typedef struct AVBufferRef {
+ AVBuffer *buffer;
+
+ /**
+ * The data buffer. It is considered writable if and only if
+ * this is the only reference to the buffer, in which case
+ * av_buffer_is_writable() returns 1.
+ */
+ uint8_t *data;
+ /**
+ * Size of data in bytes.
+ */
+ int size;
+} AVBufferRef;
+
+/**
+ * Allocate an AVBuffer of the given size using av_malloc().
+ *
+ * @return an AVBufferRef of given size or NULL when out of memory
+ */
+AVBufferRef *av_buffer_alloc(int size);
+
+/**
+ * Same as av_buffer_alloc(), except the returned buffer will be initialized
+ * to zero.
+ */
+AVBufferRef *av_buffer_allocz(int size);
+
+/**
+ * Always treat the buffer as read-only, even when it has only one
+ * reference.
+ */
+#define AV_BUFFER_FLAG_READONLY (1 << 0)
+
+/**
+ * Create an AVBuffer from an existing array.
+ *
+ * If this function is successful, data is owned by the AVBuffer. The caller may
+ * only access data through the returned AVBufferRef and references derived from
+ * it.
+ * If this function fails, data is left untouched.
+ * @param data data array
+ * @param size size of data in bytes
+ * @param free a callback for freeing this buffer's data
+ * @param opaque parameter to be got for processing or passed to free
+ * @param flags a combination of AV_BUFFER_FLAG_*
+ *
+ * @return an AVBufferRef referring to data on success, NULL on failure.
+ */
+AVBufferRef *av_buffer_create(uint8_t *data, int size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags);
+
+/**
+ * Default free callback, which calls av_free() on the buffer data.
+ * This function is meant to be passed to av_buffer_create(), not called
+ * directly.
+ */
+void av_buffer_default_free(void *opaque, uint8_t *data);
+
+/**
+ * Create a new reference to an AVBuffer.
+ *
+ * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
+ * failure.
+ */
+AVBufferRef *av_buffer_ref(AVBufferRef *buf);
+
+/**
+ * Free a given reference and automatically free the buffer if there are no more
+ * references to it.
+ *
+ * @param buf the reference to be freed. The pointer is set to NULL on return.
+ */
+void av_buffer_unref(AVBufferRef **buf);
+
+/**
+ * @return 1 if the caller may write to the data referred to by buf (which is
+ * true if and only if buf is the only reference to the underlying AVBuffer).
+ * Return 0 otherwise.
+ * A positive answer is valid until av_buffer_ref() is called on buf.
+ */
+int av_buffer_is_writable(const AVBufferRef *buf);
+
+/**
+ * @return the opaque parameter set by av_buffer_create.
+ */
+void *av_buffer_get_opaque(const AVBufferRef *buf);
+
+int av_buffer_get_ref_count(const AVBufferRef *buf);
+
+/**
+ * Create a writable reference from a given buffer reference, avoiding data copy
+ * if possible.
+ *
+ * @param buf buffer reference to make writable. On success, buf is either left
+ * untouched, or it is unreferenced and a new writable AVBufferRef is
+ * written in its place. On failure, buf is left untouched.
+ * @return 0 on success, a negative AVERROR on failure.
+ */
+int av_buffer_make_writable(AVBufferRef **buf);
+
+/**
+ * Reallocate a given buffer.
+ *
+ * @param buf a buffer reference to reallocate. On success, buf will be
+ * unreferenced and a new reference with the required size will be
+ * written in its place. On failure buf will be left untouched. *buf
+ * may be NULL, then a new buffer is allocated.
+ * @param size required new buffer size.
+ * @return 0 on success, a negative AVERROR on failure.
+ *
+ * @note the buffer is actually reallocated with av_realloc() only if it was
+ * initially allocated through av_buffer_realloc(NULL) and there is only one
+ * reference to it (i.e. the one passed to this function). In all other cases
+ * a new buffer is allocated and the data is copied.
+ */
+int av_buffer_realloc(AVBufferRef **buf, int size);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_bufferpool AVBufferPool
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.
+ *
+ * Frequently allocating and freeing large buffers may be slow. AVBufferPool is
+ * meant to solve this in cases when the caller needs a set of buffers of the
+ * same size (the most obvious use case being buffers for raw video or audio
+ * frames).
+ *
+ * At the beginning, the user must call av_buffer_pool_init() to create the
+ * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to
+ * get a reference to a new buffer, similar to av_buffer_alloc(). This new
+ * reference works in all aspects the same way as the one created by
+ * av_buffer_alloc(). However, when the last reference to this buffer is
+ * unreferenced, it is returned to the pool instead of being freed and will be
+ * reused for subsequent av_buffer_pool_get() calls.
+ *
+ * When the caller is done with the pool and no longer needs to allocate any new
+ * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.
+ * Once all the buffers are released, it will automatically be freed.
+ *
+ * Allocating and releasing buffers with this API is thread-safe as long as
+ * either the default alloc callback is used, or the user-supplied one is
+ * thread-safe.
+ */
+
+/**
+ * The buffer pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with av_buffer_pool_init() and freed with
+ * av_buffer_pool_uninit().
+ */
+typedef struct AVBufferPool AVBufferPool;
+
+/**
+ * Allocate and initialize a buffer pool.
+ *
+ * @param size size of each buffer in this pool
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
+
+/**
+ * Mark the pool as being available for freeing. It will actually be freed only
+ * once all the allocated buffers associated with the pool are released. Thus it
+ * is safe to call this function while some of the allocated buffers are still
+ * in use.
+ *
+ * @param pool pointer to the pool to be freed. It will be set to NULL.
+ * @see av_buffer_pool_can_uninit()
+ */
+void av_buffer_pool_uninit(AVBufferPool **pool);
+
+/**
+ * Allocate a new AVBuffer, reusing an old buffer from the pool when available.
+ * This function may be called simultaneously from multiple threads.
+ *
+ * @return a reference to the new buffer on success, NULL on error.
+ */
+AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BUFFER_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/channel_layout.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/channel_layout.h
new file mode 100644
index 000000000..ec7effead
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/channel_layout.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * audio channel layout utility functions
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ *
+ * A channel layout is a 64-bits integer with a bit set for every channel.
+ * The number of bits set must be equal to the number of channels.
+ * The value 0 means that the channel layout is not known.
+ * @note this data structure is not powerful enough to handle channels
+ * combinations that have the same channel multiple times, such as
+ * dual-mono.
+ *
+ * @{
+ */
+#define AV_CH_FRONT_LEFT 0x00000001
+#define AV_CH_FRONT_RIGHT 0x00000002
+#define AV_CH_FRONT_CENTER 0x00000004
+#define AV_CH_LOW_FREQUENCY 0x00000008
+#define AV_CH_BACK_LEFT 0x00000010
+#define AV_CH_BACK_RIGHT 0x00000020
+#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
+#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
+#define AV_CH_BACK_CENTER 0x00000100
+#define AV_CH_SIDE_LEFT 0x00000200
+#define AV_CH_SIDE_RIGHT 0x00000400
+#define AV_CH_TOP_CENTER 0x00000800
+#define AV_CH_TOP_FRONT_LEFT 0x00001000
+#define AV_CH_TOP_FRONT_CENTER 0x00002000
+#define AV_CH_TOP_FRONT_RIGHT 0x00004000
+#define AV_CH_TOP_BACK_LEFT 0x00008000
+#define AV_CH_TOP_BACK_CENTER 0x00010000
+#define AV_CH_TOP_BACK_RIGHT 0x00020000
+#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
+#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
+#define AV_CH_WIDE_LEFT 0x0000000080000000ULL
+#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
+#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
+#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
+#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order. */
+#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel layouts
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+
+enum AVMatrixEncoding {
+ AV_MATRIX_ENCODING_NONE,
+ AV_MATRIX_ENCODING_DOLBY,
+ AV_MATRIX_ENCODING_DPLII,
+ AV_MATRIX_ENCODING_DPLIIX,
+ AV_MATRIX_ENCODING_DPLIIZ,
+ AV_MATRIX_ENCODING_DOLBYEX,
+ AV_MATRIX_ENCODING_DOLBYHEADPHONE,
+ AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * Return a channel layout id that matches name, or 0 if no match is found.
+ *
+ * name can be one or several of the following notations,
+ * separated by '+' or '|':
+ * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
+ * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
+ * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
+ * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
+ * - a number of channels, in decimal, optionally followed by 'c', yielding
+ * the default channel layout for that number of channels (@see
+ * av_get_default_channel_layout);
+ * - a channel layout mask, in hexadecimal starting with "0x" (see the
+ * AV_CH_* macros).
+ *
+ * @warning Starting from the next major bump the trailing character
+ * 'c' to specify a number of channels will be required, while a
+ * channel layout mask could also be specified as a decimal number
+ * (if and only if not followed by "c").
+ *
+ * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
+ */
+uint64_t av_get_channel_layout(const char *name);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ */
+void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
+
+struct AVBPrint;
+/**
+ * Append a description of a channel layout to a bprint buffer.
+ */
+void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ */
+int av_get_channel_layout_nb_channels(uint64_t channel_layout);
+
+/**
+ * Return default channel layout for a given number of channels.
+ */
+int64_t av_get_default_channel_layout(int nb_channels);
+
+/**
+ * Get the index of a channel in channel_layout.
+ *
+ * @param channel a channel layout describing exactly one channel which must be
+ * present in channel_layout.
+ *
+ * @return index of channel in channel_layout on success, a negative AVERROR
+ * on error.
+ */
+int av_get_channel_layout_channel_index(uint64_t channel_layout,
+ uint64_t channel);
+
+/**
+ * Get the channel with the given index in channel_layout.
+ */
+uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
+
+/**
+ * Get the name of a given channel.
+ *
+ * @return channel name on success, NULL on error.
+ */
+const char *av_get_channel_name(uint64_t channel);
+
+/**
+ * Get the description of a given channel.
+ *
+ * @param channel a channel layout with a single channel
+ * @return channel description on success, NULL on error
+ */
+const char *av_get_channel_description(uint64_t channel);
+
+/**
+ * Get the value and name of a standard channel layout.
+ *
+ * @param[in] index index in an internal list, starting at 0
+ * @param[out] layout channel layout mask
+ * @param[out] name name of the layout
+ * @return 0 if the layout exists,
+ * <0 if index is beyond the limits
+ */
+int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
+ const char **name);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/common.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/common.h
new file mode 100644
index 000000000..ea636431c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/common.h
@@ -0,0 +1,519 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C)
+#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS
+#endif
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "macros.h"
+#include "version.h"
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+/* assume a>0 and b>0 */
+#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \
+ : ((a) + (1<<(b)) - 1) >> (b))
+#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))
+#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))
+
+/**
+ * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they
+ * are not representable as absolute values of their type. This is the same
+ * as with *abs()
+ * @see FFNABS()
+ */
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+/**
+ * Negative Absolute value.
+ * this works for all integers of all types.
+ * As with many macros, this evaluates its argument twice, it thus must not have
+ * a sideeffect, that is FFNABS(x++) has undefined behavior.
+ */
+#define FFNABS(a) ((a) <= 0 ? (a) : (-(a)))
+
+/**
+ * Comparator.
+ * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0
+ * if x == y. This is useful for instance in a qsort comparator callback.
+ * Furthermore, compilers are able to optimize this to branchless code, and
+ * there is no risk of overflow with signed types.
+ * As with many macros, this evaluates its argument multiple times, it thus
+ * must not have a side-effect.
+ */
+#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y)))
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+
+/* misc math functions */
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+#endif
+
+/* Pull in unguarded fallback defines at the end of this file. */
+#include "common.h"
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed 64bit integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
+{
+ if (a&(~0xFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a)
+{
+ if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
+{
+ if (a&(~0xFFFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a)
+{
+ if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
+ else return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
+{
+ if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF);
+ else return (int32_t)a;
+}
+
+/**
+ * Clip a signed integer into the -(2^p),(2^p-1) range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_intp2_c(int a, int p)
+{
+ if ((a + (1 << p)) & ~((2 << p) - 1))
+ return (a >> 31) ^ ((1 << p) - 1);
+ else
+ return a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+ if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);
+ else return a;
+}
+
+/**
+ * Clear high bits from an unsigned integer starting with specific bit position
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_mod_uintp2_c(unsigned a, unsigned p)
+{
+ return a & ((1 << p) - 1);
+}
+
+/**
+ * Add two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_add32_c(int a, int b)
+{
+ return av_clipl_int32((int64_t)a + b);
+}
+
+/**
+ * Add a doubled value to another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and added to a
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_dadd32_c(int a, int b)
+{
+ return av_sat_add32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin, float amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a double value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const double av_clipd_c(double a, double amin, double amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x)
+{
+ return av_log2((x - 1) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x)
+{
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x)
+{
+ return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));
+}
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ *
+ * @warning ERROR should not contain a loop control statement which
+ * could interact with the internal while loop, and should force an
+ * exit from the macro code (e.g. through a goto or a return) in order
+ * to prevent undefined results.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR)\
+ val= GET_BYTE;\
+ {\
+ uint32_t top = (val & 128) >> 1;\
+ if ((val & 0xc0) == 0x80 || val >= 0xFE)\
+ ERROR\
+ while (val & top) {\
+ int tmp= GET_BYTE - 128;\
+ if(tmp>>6)\
+ ERROR\
+ val= (val<<6) + tmp;\
+ top <<= 5;\
+ }\
+ val &= (top << 1) - 1;\
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR)\
+ val = GET_16BIT;\
+ {\
+ unsigned int hi = val - 0xD800;\
+ if (hi < 0x800) {\
+ val = GET_16BIT - 0xDC00;\
+ if (val > 0x3FFU || hi > 0x3FFU)\
+ ERROR\
+ val += (hi<<10) + 0x10000;\
+ }\
+ }\
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+ {\
+ int bytes, shift;\
+ uint32_t in = val;\
+ if (in < 0x80) {\
+ tmp = in;\
+ PUT_BYTE\
+ } else {\
+ bytes = (av_log2(in) + 4) / 5;\
+ shift = (bytes - 1) * 6;\
+ tmp = (256 - (256 >> bytes)) | (in >> shift);\
+ PUT_BYTE\
+ while (shift >= 6) {\
+ shift -= 6;\
+ tmp = 0x80 | ((in >> shift) & 0x3f);\
+ PUT_BYTE\
+ }\
+ }\
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT)\
+ {\
+ uint32_t in = val;\
+ if (in < 0x10000) {\
+ tmp = in;\
+ PUT_16BIT\
+ } else {\
+ tmp = 0xD800 | ((in - 0x10000) >> 10);\
+ PUT_16BIT\
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
+ PUT_16BIT\
+ }\
+ }\
+
+
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
+
+/*
+ * The following definitions are outside the multiple inclusion guard
+ * to ensure they are immediately available in intmath.h.
+ */
+
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip64
+# define av_clip64 av_clip64_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_intp2
+# define av_clip_intp2 av_clip_intp2_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_mod_uintp2
+# define av_mod_uintp2 av_mod_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_clipd
+# define av_clipd av_clipd_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/cpu.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/cpu.h
new file mode 100644
index 000000000..cc4e30c4c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/cpu.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include "attributes.h"
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+ /* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions
+#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer)
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction
+#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions
+#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
+#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
+
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06
+#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07
+
+#define AV_CPU_FLAG_ARMV5TE (1 << 0)
+#define AV_CPU_FLAG_ARMV6 (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
+#define AV_CPU_FLAG_VFP (1 << 3)
+#define AV_CPU_FLAG_VFPV3 (1 << 4)
+#define AV_CPU_FLAG_NEON (1 << 5)
+#define AV_CPU_FLAG_ARMV8 (1 << 6)
+#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations
+#define AV_CPU_FLAG_SETEND (1 <<16)
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ * The returned value is affected by av_force_cpu_flags() if that was used
+ * before. So av_get_cpu_flags() can easily be used in a application to
+ * detect the enabled cpu flags.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Disables cpu detection and forces the specified flags.
+ * -1 is a special case that disables forcing of specific flags.
+ */
+void av_force_cpu_flags(int flags);
+
+/**
+ * Set a mask on flags returned by av_get_cpu_flags().
+ * This function is mainly useful for testing.
+ * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible
+ *
+ * @warning this function is not thread safe.
+ */
+attribute_deprecated void av_set_cpu_flags_mask(int mask);
+
+/**
+ * Parse CPU flags from a string.
+ *
+ * The returned flags contain the specified flags as well as related unspecified flags.
+ *
+ * This function exists only for compatibility with libav.
+ * Please use av_parse_cpu_caps() when possible.
+ * @return a combination of AV_CPU_* flags, negative on error.
+ */
+attribute_deprecated
+int av_parse_cpu_flags(const char *s);
+
+/**
+ * Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
+ *
+ * @return negative on error.
+ */
+int av_parse_cpu_caps(unsigned *flags, const char *s);
+
+/**
+ * @return the number of logical CPU cores present.
+ */
+int av_cpu_count(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/dict.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/dict.h
new file mode 100644
index 000000000..5b8d00339
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/dict.h
@@ -0,0 +1,198 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ * @deprecated
+ * AVDictionary is provided for compatibility with libav. It is both in
+ * implementation as well as API inefficient. It does not scale and is
+ * extremely slow with large dictionaries.
+ * It is recommended that new code uses our tree container from tree.c/h
+ * where applicable, which uses AVL trees to achieve O(log n) performance.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+#include <stdint.h>
+
+#include "version.h"
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key:value pairs. To create
+ * an AVDictionary, simply pass an address of a NULL pointer to
+ * av_dict_set(). NULL can be used as an empty dictionary wherever
+ * a pointer to an AVDictionary is required.
+ * Use av_dict_get() to retrieve an entry or iterate over all
+ * entries and finally av_dict_free() to free the dictionary
+ * and all its contents.
+ *
+ @code
+ AVDictionary *d = NULL; // "create" an empty dictionary
+ AVDictionaryEntry *t = NULL;
+
+ av_dict_set(&d, "foo", "bar", 0); // add an entry
+
+ char *k = av_strdup("key"); // if your strings are already allocated,
+ char *v = av_strdup("value"); // you can avoid copying them like this
+ av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+ while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
+ <....> // iterate over all entries in d
+ }
+ av_dict_free(&d);
+ @endcode
+ *
+ */
+
+#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */
+#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key,
+ ignoring the suffix of the found key string. Only relevant in av_dict_get(). */
+#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
+#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
+ delimiter is added, the strings are simply concatenated. */
+
+typedef struct AVDictionaryEntry {
+ char *key;
+ char *value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * The returned entry key or value must not be changed, or it will
+ * cause undefined behavior.
+ *
+ * To iterate through all the dictionary entries, you can set the matching key
+ * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param key matching key
+ * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved
+ * @return found entry or NULL in case no matching entry was found in the dictionary
+ */
+AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
+ const AVDictionaryEntry *prev, int flags);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary *m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
+ * these arguments will be freed on error.
+ *
+ * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key entry key to add to *pm (will be av_strduped depending on flags)
+ * @param value entry value to add to *pm (will be av_strduped depending on flags).
+ * Passing a NULL value will cause an existing entry to be deleted.
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
+
+/**
+ * Convenience wrapper for av_dict_set that converts the value to a string
+ * and stores it.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ */
+int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);
+
+/**
+ * Parse the key/value pairs list and add the parsed entries to a dictionary.
+ *
+ * In case of failure, all the successfully set entries are stored in
+ * *pm. You may need to manually free the created dictionary.
+ *
+ * @param key_val_sep a 0-terminated list of characters used to separate
+ * key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @param flags flags to use when adding to dictionary.
+ * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL
+ * are ignored since the key/value tokens will always
+ * be duplicated.
+ * @return 0 on success, negative AVERROR code on failure
+ */
+int av_dict_parse_string(AVDictionary **pm, const char *str,
+ const char *key_val_sep, const char *pairs_sep,
+ int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src pointer to source AVDictionary struct
+ * @param flags flags to use when setting entries in *dst
+ * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ * @return 0 on success, negative AVERROR code on failure. If dst was allocated
+ * by this function, callers should free the associated memory.
+ */
+int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary **m);
+
+/**
+ * Get dictionary entries as a string.
+ *
+ * Create a string containing dictionary's entries.
+ * Such string may be passed back to av_dict_parse_string().
+ * @note String is escaped with backslashes ('\').
+ *
+ * @param[in] m dictionary
+ * @param[out] buffer Pointer to buffer that will be allocated with string containg entries.
+ * Buffer must be freed by the caller when is no longer needed.
+ * @param[in] key_val_sep character used to separate key from value
+ * @param[in] pairs_sep character used to separate two pairs from each other
+ * @return >= 0 on success, negative on error
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
+ */
+int av_dict_get_string(const AVDictionary *m, char **buffer,
+ const char key_val_sep, const char pairs_sep);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/error.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/error.h
new file mode 100644
index 000000000..71df4da35
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/error.h
@@ -0,0 +1,126 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include <errno.h>
+#include <stddef.h>
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+
+/* error handling */
+#if EDOM > 0
+#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions.
+#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+
+#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))
+
+#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found
+#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2
+#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small
+#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found
+#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file
+#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted
+#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library
+#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found
+#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found
+#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found
+
+#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found
+/**
+ * This is semantically identical to AVERROR_BUG
+ * it has been introduced in Libav after our AVERROR_BUG and with a modified value.
+ */
+#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ')
+#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library
+#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
+#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED)
+#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED)
+/* HTTP & RTSP errors */
+#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0')
+#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1')
+#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3')
+#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4')
+#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X')
+#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X')
+
+#define AV_ERROR_MAX_STRING_SIZE 64
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum error code to describe
+ * @param errbuf buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
+
+/**
+ * Fill the provided buffer with a string containing an error string
+ * corresponding to the AVERROR code errnum.
+ *
+ * @param errbuf a buffer
+ * @param errbuf_size size in bytes of errbuf
+ * @param errnum error code to describe
+ * @return the buffer in input, filled with the error description
+ * @see av_strerror()
+ */
+static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum)
+{
+ av_strerror(errnum, errbuf, errbuf_size);
+ return errbuf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_err2str(errnum) \
+ av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/frame.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/frame.h
new file mode 100644
index 000000000..9c6061a11
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/frame.h
@@ -0,0 +1,713 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_frame
+ * reference-counted frame API
+ */
+
+#ifndef AVUTIL_FRAME_H
+#define AVUTIL_FRAME_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+#include "buffer.h"
+#include "dict.h"
+#include "rational.h"
+#include "samplefmt.h"
+#include "pixfmt.h"
+#include "version.h"
+
+
+/**
+ * @defgroup lavu_frame AVFrame
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVFrame is an abstraction for reference-counted raw multimedia data.
+ */
+
+enum AVFrameSideDataType {
+ /**
+ * The data is the AVPanScan struct defined in libavcodec.
+ */
+ AV_FRAME_DATA_PANSCAN,
+ /**
+ * ATSC A53 Part 4 Closed Captions.
+ * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
+ * The number of bytes of CC data is AVFrameSideData.size.
+ */
+ AV_FRAME_DATA_A53_CC,
+ /**
+ * Stereoscopic 3d metadata.
+ * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
+ */
+ AV_FRAME_DATA_STEREO3D,
+ /**
+ * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
+ */
+ AV_FRAME_DATA_MATRIXENCODING,
+ /**
+ * Metadata relevant to a downmix procedure.
+ * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
+ */
+ AV_FRAME_DATA_DOWNMIX_INFO,
+ /**
+ * ReplayGain information in the form of the AVReplayGain struct.
+ */
+ AV_FRAME_DATA_REPLAYGAIN,
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the frame for correct
+ * presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_FRAME_DATA_DISPLAYMATRIX,
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_FRAME_DATA_AFD,
+ /**
+ * Motion vectors exported by some codecs (on demand through the export_mvs
+ * flag set in the libavcodec AVCodecContext flags2 option).
+ * The data is the AVMotionVector struct defined in
+ * libavutil/motion_vector.h.
+ */
+ AV_FRAME_DATA_MOTION_VECTORS,
+ /**
+ * Recommmends skipping the specified number of samples. This is exported
+ * only if the "skip_manual" AVOption is set in libavcodec.
+ * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_FRAME_DATA_SKIP_SAMPLES,
+
+ /**
+ * This side data must be associated with an audio frame and corresponds to
+ * enum AVAudioServiceType defined in avcodec.h.
+ */
+ AV_FRAME_DATA_AUDIO_SERVICE_TYPE,
+};
+
+enum AVActiveFormatDescription {
+ AV_AFD_SAME = 8,
+ AV_AFD_4_3 = 9,
+ AV_AFD_16_9 = 10,
+ AV_AFD_14_9 = 11,
+ AV_AFD_4_3_SP_14_9 = 13,
+ AV_AFD_16_9_SP_14_9 = 14,
+ AV_AFD_SP_4_3 = 15,
+};
+
+
+/**
+ * Structure to hold side data for an AVFrame.
+ *
+ * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ */
+typedef struct AVFrameSideData {
+ enum AVFrameSideDataType type;
+ uint8_t *data;
+ int size;
+ AVDictionary *metadata;
+ AVBufferRef *buf;
+} AVFrameSideData;
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using av_frame_alloc(). Note that this only
+ * allocates the AVFrame itself, the buffers for the data must be managed
+ * through other means (see below).
+ * AVFrame must be freed with av_frame_free().
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, av_frame_unref() will free any references held by
+ * the frame and reset it to its original clean state before it
+ * is reused again.
+ *
+ * The data described by an AVFrame is usually reference counted through the
+ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
+ * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
+ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
+ * every single data plane must be contained in one of the buffers in
+ * AVFrame.buf or AVFrame.extended_buf.
+ * There may be a single buffer for all the data, or one separate buffer for
+ * each plane, or anything in between.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ * Similarly fields that are marked as to be only accessed by
+ * av_opt_ptr() can be reordered. This allows 2 forks to add fields
+ * without breaking compatibility with each other.
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte
+ *
+ * Some decoders access areas outside 0,0 - width,height, please
+ * see avcodec_align_dimensions2(). Some filters and swscale can read
+ * up to 16 bytes beyond the planes, if these filters are to be used,
+ * then 16 extra bytes must be allocated.
+ */
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For video, size in bytes of each picture line.
+ * For audio, size in bytes of each plane.
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * For video the linesizes should be multiples of the CPUs alignment
+ * preference, this is 16 or 32 for modern desktop CPUs.
+ * Some code requires such alignment other code can be slower without
+ * correct alignment, for yet other it makes no difference.
+ *
+ * @note The linesize may be larger than the size of usable data -- there
+ * may be extra padding present for performance reasons.
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data should always be set in a valid frame,
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used in order to access all channels.
+ */
+ uint8_t **extended_data;
+
+ /**
+ * width and height of the video frame
+ */
+ int width, height;
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ */
+ int nb_samples;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum AVPixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ */
+ int format;
+
+ /**
+ * 1 -> keyframe, 0-> not
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame.
+ */
+ enum AVPictureType pict_type;
+
+ /**
+ * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Presentation timestamp in time_base units (time when frame should be shown to user).
+ */
+ int64_t pts;
+
+ /**
+ * PTS copied from the AVPacket that was decoded to produce this frame.
+ */
+ int64_t pkt_pts;
+
+ /**
+ * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
+ * This is also the Presentation time of this AVFrame calculated from
+ * only AVPacket.dts values without pts values.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * picture number in bitstream order
+ */
+ int coded_picture_number;
+ /**
+ * picture number in display order
+ */
+ int display_picture_number;
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ */
+ int quality;
+
+ /**
+ * for some private data of the user
+ */
+ void *opaque;
+
+#if FF_API_ERROR_FRAME
+ /**
+ * @deprecated unused
+ */
+ attribute_deprecated
+ uint64_t error[AV_NUM_DATA_POINTERS];
+#endif
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ */
+ int repeat_pict;
+
+ /**
+ * The content of the picture is interlaced.
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ */
+ int top_field_first;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ */
+ int palette_has_changed;
+
+ /**
+ * reordered opaque 64bit (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * Sample rate of the audio data.
+ */
+ int sample_rate;
+
+ /**
+ * Channel layout of the audio data.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * AVBuffer references backing the data for this frame. If all elements of
+ * this array are NULL, then this frame is not reference counted. This array
+ * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must
+ * also be non-NULL for all j < i.
+ *
+ * There may be at most one AVBuffer per data plane, so for video this array
+ * always contains all the references. For planar audio with more than
+ * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
+ * this array. Then the extra AVBufferRef pointers are stored in the
+ * extended_buf array.
+ */
+ AVBufferRef *buf[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For planar audio which requires more than AV_NUM_DATA_POINTERS
+ * AVBufferRef pointers, this array will hold all the references which
+ * cannot fit into AVFrame.buf.
+ *
+ * Note that this is different from AVFrame.extended_data, which always
+ * contains all the pointers. This array only contains the extra pointers,
+ * which cannot fit into AVFrame.buf.
+ *
+ * This array is always allocated using av_malloc() by whoever constructs
+ * the frame. It is freed in av_frame_unref().
+ */
+ AVBufferRef **extended_buf;
+ /**
+ * Number of elements in extended_buf.
+ */
+ int nb_extended_buf;
+
+ AVFrameSideData **side_data;
+ int nb_side_data;
+
+/**
+ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
+ * Flags describing additional frame properties.
+ *
+ * @{
+ */
+
+/**
+ * The frame data may be corrupted, e.g. due to decoding errors.
+ */
+#define AV_FRAME_FLAG_CORRUPT (1 << 0)
+/**
+ * @}
+ */
+
+ /**
+ * Frame flags, a combination of @ref lavu_frame_flags
+ */
+ int flags;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * It must be accessed using av_frame_get_color_range() and
+ * av_frame_set_color_range().
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ enum AVColorPrimaries color_primaries;
+
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * It must be accessed using av_frame_get_colorspace() and
+ * av_frame_set_colorspace().
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * frame timestamp estimated using various heuristics, in stream time base
+ * Code outside libavutil should access this field using:
+ * av_frame_get_best_effort_timestamp(frame)
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int64_t best_effort_timestamp;
+
+ /**
+ * reordered pos from the last AVPacket that has been input into the decoder
+ * Code outside libavutil should access this field using:
+ * av_frame_get_pkt_pos(frame)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_pos;
+
+ /**
+ * duration of the corresponding packet, expressed in
+ * AVStream->time_base units, 0 if unknown.
+ * Code outside libavutil should access this field using:
+ * av_frame_get_pkt_duration(frame)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_duration;
+
+ /**
+ * metadata.
+ * Code outside libavutil should access this field using:
+ * av_frame_get_metadata(frame)
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVDictionary *metadata;
+
+ /**
+ * decode error flags of the frame, set to a combination of
+ * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
+ * were errors during the decoding.
+ * Code outside libavutil should access this field using:
+ * av_frame_get_decode_error_flags(frame)
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int decode_error_flags;
+#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
+#define FF_DECODE_ERROR_MISSING_REFERENCE 2
+
+ /**
+ * number of audio channels, only used for audio.
+ * Code outside libavutil should access this field using:
+ * av_frame_get_channels(frame)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int channels;
+
+ /**
+ * size of the corresponding packet containing the compressed
+ * frame. It must be accessed using av_frame_get_pkt_size() and
+ * av_frame_set_pkt_size().
+ * It is set to a negative value if unknown.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int pkt_size;
+
+#if FF_API_FRAME_QP
+ /**
+ * QP table
+ * Not to be accessed directly from outside libavutil
+ */
+ attribute_deprecated
+ int8_t *qscale_table;
+ /**
+ * QP store stride
+ * Not to be accessed directly from outside libavutil
+ */
+ attribute_deprecated
+ int qstride;
+
+ attribute_deprecated
+ int qscale_type;
+
+ /**
+ * Not to be accessed directly from outside libavutil
+ */
+ AVBufferRef *qp_table_buf;
+#endif
+} AVFrame;
+
+/**
+ * Accessors for some AVFrame fields.
+ * The position of these field in the structure is not part of the ABI,
+ * they should not be accessed directly outside libavutil.
+ */
+int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);
+void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);
+int64_t av_frame_get_pkt_duration (const AVFrame *frame);
+void av_frame_set_pkt_duration (AVFrame *frame, int64_t val);
+int64_t av_frame_get_pkt_pos (const AVFrame *frame);
+void av_frame_set_pkt_pos (AVFrame *frame, int64_t val);
+int64_t av_frame_get_channel_layout (const AVFrame *frame);
+void av_frame_set_channel_layout (AVFrame *frame, int64_t val);
+int av_frame_get_channels (const AVFrame *frame);
+void av_frame_set_channels (AVFrame *frame, int val);
+int av_frame_get_sample_rate (const AVFrame *frame);
+void av_frame_set_sample_rate (AVFrame *frame, int val);
+AVDictionary *av_frame_get_metadata (const AVFrame *frame);
+void av_frame_set_metadata (AVFrame *frame, AVDictionary *val);
+int av_frame_get_decode_error_flags (const AVFrame *frame);
+void av_frame_set_decode_error_flags (AVFrame *frame, int val);
+int av_frame_get_pkt_size(const AVFrame *frame);
+void av_frame_set_pkt_size(AVFrame *frame, int val);
+AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame);
+#if FF_API_FRAME_QP
+int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);
+int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);
+#endif
+enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);
+void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);
+enum AVColorRange av_frame_get_color_range(const AVFrame *frame);
+void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);
+
+/**
+ * Get the name of a colorspace.
+ * @return a static string identifying the colorspace; can be NULL.
+ */
+const char *av_get_colorspace_name(enum AVColorSpace val);
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct must be freed using av_frame_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVFrame itself, not the data buffers. Those
+ * must be allocated through other means, e.g. with av_frame_get_buffer() or
+ * manually.
+ */
+AVFrame *av_frame_alloc(void);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data. If the frame is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ */
+void av_frame_free(AVFrame **frame);
+
+/**
+ * Set up a new reference to the data described by the source frame.
+ *
+ * Copy frame properties from src to dst and create a new reference for each
+ * AVBufferRef from src.
+ *
+ * If src is not reference counted, new buffers are allocated and the data is
+ * copied.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_frame_ref(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Create a new frame that references the same data as src.
+ *
+ * This is a shortcut for av_frame_alloc()+av_frame_ref().
+ *
+ * @return newly created AVFrame on success, NULL on error.
+ */
+AVFrame *av_frame_clone(const AVFrame *src);
+
+/**
+ * Unreference all the buffers referenced by frame and reset the frame fields.
+ */
+void av_frame_unref(AVFrame *frame);
+
+/**
+ * Move everything contained in src to dst and reset src.
+ */
+void av_frame_move_ref(AVFrame *dst, AVFrame *src);
+
+/**
+ * Allocate new buffer(s) for audio or video data.
+ *
+ * The following fields must be set on frame before calling this function:
+ * - format (pixel format for video, sample format for audio)
+ * - width and height for video
+ * - nb_samples and channel_layout for audio
+ *
+ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
+ * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
+ * For planar formats, one buffer will be allocated for each plane.
+ *
+ * @param frame frame in which to store the new buffers.
+ * @param align required buffer size alignment
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_frame_get_buffer(AVFrame *frame, int align);
+
+/**
+ * Check if the frame data is writable.
+ *
+ * @return A positive value if the frame data is writable (which is true if and
+ * only if each of the underlying buffers has only one reference, namely the one
+ * stored in this frame). Return 0 otherwise.
+ *
+ * If 1 is returned the answer is valid until av_buffer_ref() is called on any
+ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
+ *
+ * @see av_frame_make_writable(), av_buffer_is_writable()
+ */
+int av_frame_is_writable(AVFrame *frame);
+
+/**
+ * Ensure that the frame data is writable, avoiding data copy if possible.
+ *
+ * Do nothing if the frame is writable, allocate new buffers and copy the data
+ * if it is not.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @see av_frame_is_writable(), av_buffer_is_writable(),
+ * av_buffer_make_writable()
+ */
+int av_frame_make_writable(AVFrame *frame);
+
+/**
+ * Copy the frame data from src to dst.
+ *
+ * This function does not allocate anything, dst must be already initialized and
+ * allocated with the same parameters as src.
+ *
+ * This function only copies the frame data (i.e. the contents of the data /
+ * extended data arrays), not any other properties.
+ *
+ * @return >= 0 on success, a negative AVERROR on error.
+ */
+int av_frame_copy(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Copy only "metadata" fields from src to dst.
+ *
+ * Metadata for the purpose of this function are those fields that do not affect
+ * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample
+ * aspect ratio (for video), but not width/height or channel layout.
+ * Side data is also copied.
+ */
+int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Get the buffer reference a given data plane is stored in.
+ *
+ * @param plane index of the data plane of interest in frame->extended_data.
+ *
+ * @return the buffer reference that contains the plane or NULL if the input
+ * frame is not valid.
+ */
+AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
+
+/**
+ * Add a new side data to a frame.
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type type of the added side data
+ * @param size size of the side data
+ *
+ * @return newly added side data on success, NULL on error
+ */
+AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
+ enum AVFrameSideDataType type,
+ int size);
+
+/**
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this frame.
+ */
+AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
+ enum AVFrameSideDataType type);
+
+/**
+ * If side data of the supplied type exists in the frame, free it and remove it
+ * from the frame.
+ */
+void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
+
+/**
+ * @return a string identifying the side data type
+ */
+const char *av_frame_side_data_name(enum AVFrameSideDataType type);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FRAME_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h
new file mode 100644
index 000000000..fe3d7ec4a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+union av_intfloat32 {
+ uint32_t i;
+ float f;
+};
+
+union av_intfloat64 {
+ uint64_t i;
+ double f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i)
+{
+ union av_intfloat32 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f)
+{
+ union av_intfloat32 v;
+ v.f = f;
+ return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i)
+{
+ union av_intfloat64 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f)
+{
+ union av_intfloat64 v;
+ v.f = f;
+ return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/log.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/log.h
new file mode 100644
index 000000000..321748cd8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/log.h
@@ -0,0 +1,359 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "avutil.h"
+#include "attributes.h"
+#include "version.h"
+
+typedef enum {
+ AV_CLASS_CATEGORY_NA = 0,
+ AV_CLASS_CATEGORY_INPUT,
+ AV_CLASS_CATEGORY_OUTPUT,
+ AV_CLASS_CATEGORY_MUXER,
+ AV_CLASS_CATEGORY_DEMUXER,
+ AV_CLASS_CATEGORY_ENCODER,
+ AV_CLASS_CATEGORY_DECODER,
+ AV_CLASS_CATEGORY_FILTER,
+ AV_CLASS_CATEGORY_BITSTREAM_FILTER,
+ AV_CLASS_CATEGORY_SWSCALER,
+ AV_CLASS_CATEGORY_SWRESAMPLER,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_INPUT,
+ AV_CLASS_CATEGORY_NB, ///< not part of ABI/API
+}AVClassCategory;
+
+#define AV_IS_INPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))
+
+#define AV_IS_OUTPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))
+
+struct AVOptionRanges;
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption *option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for
+ * logging is stored. For example a decoder could pass its AVCodecContext
+ * to eval as such a parent context, which an av_log() implementation
+ * could then leverage to display the parent context.
+ * The offset can be NULL.
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void *obj, void *prev);
+
+ /**
+ * Return an AVClass corresponding to the next potential
+ * AVOptions-enabled child.
+ *
+ * The difference between child_next and this is that
+ * child_next iterates over _already existing_ objects, while
+ * child_class_next iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_next)(const struct AVClass *prev);
+
+ /**
+ * Category used for visualization (like color)
+ * This is only set if the category is equal for all objects using this class.
+ * available since version (51 << 16 | 56 << 8 | 100)
+ */
+ AVClassCategory category;
+
+ /**
+ * Callback to return the category.
+ * available since version (51 << 16 | 59 << 8 | 100)
+ */
+ AVClassCategory (*get_category)(void* ctx);
+
+ /**
+ * Callback to return the supported/allowed ranges.
+ * available since version (52.12)
+ */
+ int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);
+} AVClass;
+
+/**
+ * @addtogroup lavu_log
+ *
+ * @{
+ *
+ * @defgroup lavu_log_constants Logging Constants
+ *
+ * @{
+ */
+
+/**
+ * Print no output.
+ */
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+/**
+ * Standard information.
+ */
+#define AV_LOG_INFO 32
+
+/**
+ * Detailed information.
+ */
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+/**
+ * Extremely verbose debugging, useful for libav* development.
+ */
+#define AV_LOG_TRACE 56
+
+#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET)
+
+/**
+ * @}
+ */
+
+/**
+ * Sets additional colors for extended debugging sessions.
+ * @code
+ av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
+ @endcode
+ * Requires 256color terminal support. Uses outside debugging is not
+ * recommended.
+ */
+#define AV_LOG_C(x) ((x) << 8)
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct or NULL if general log.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ */
+void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_vlog(void *avcl, int level, const char *fmt, va_list vl);
+
+/**
+ * Get the current log level
+ *
+ * @see lavu_log_constants
+ *
+ * @return Current log level
+ */
+int av_log_get_level(void);
+
+/**
+ * Set the log level
+ *
+ * @see lavu_log_constants
+ *
+ * @param level Logging level
+ */
+void av_log_set_level(int level);
+
+/**
+ * Set the logging callback
+ *
+ * @note The callback must be thread safe, even if the application does not use
+ * threads itself as some codecs are multithreaded.
+ *
+ * @see av_log_default_callback
+ *
+ * @param callback A logging function with a compatible signature.
+ */
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list));
+
+/**
+ * Default logging callback
+ *
+ * It prints the message to stderr, optionally colorizing it.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_log_default_callback(void *avcl, int level, const char *fmt,
+ va_list vl);
+
+/**
+ * Return the context name
+ *
+ * @param ctx The AVClass context
+ *
+ * @return The AVClass class_name
+ */
+const char* av_default_item_name(void* ctx);
+AVClassCategory av_default_get_category(void *ptr);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formatted line
+ * @param line_size size of the buffer
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ */
+void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,
+ char *line, int line_size, int *print_prefix);
+
+#if FF_API_DLOG
+/**
+ * av_dlog macros
+ * @deprecated unused
+ * Useful to print debug messages that shouldn't get compiled in normally.
+ */
+
+#ifdef DEBUG
+# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
+#endif
+#endif /* FF_API_DLOG */
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+
+/**
+ * Include the log severity in messages originating from codecs.
+ *
+ * Results in messages such as:
+ * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
+ */
+#define AV_LOG_PRINT_LEVEL 2
+
+void av_log_set_flags(int arg);
+int av_log_get_flags(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LOG_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/macros.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/macros.h
new file mode 100644
index 000000000..2007ee561
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/macros.h
@@ -0,0 +1,50 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Utility Preprocessor macros
+ */
+
+#ifndef AVUTIL_MACROS_H
+#define AVUTIL_MACROS_H
+
+/**
+ * @addtogroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+/**
+ * @}
+ */
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))
+
+#endif /* AVUTIL_MACROS_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mathematics.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mathematics.h
new file mode 100644
index 000000000..57c44f845
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mathematics.h
@@ -0,0 +1,165 @@
+/*
+ * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_E
+#define M_E 2.7182818284590452354 /* e */
+#endif
+#ifndef M_LN2
+#define M_LN2 0.69314718055994530942 /* log_e 2 */
+#endif
+#ifndef M_LN10
+#define M_LN10 2.30258509299404568402 /* log_e 10 */
+#endif
+#ifndef M_LOG2_10
+#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_PHI
+#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef M_PI
+#define M_PI 3.14159265358979323846 /* pi */
+#endif
+#ifndef M_PI_2
+#define M_PI_2 1.57079632679489661923 /* pi/2 */
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT2
+#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
+#endif
+#ifndef NAN
+#define NAN av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+#define INFINITY av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
+ AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE
+};
+
+/**
+ * Compute the greatest common divisor of a and b.
+ *
+ * @return gcd of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0;
+ * if a == 0 and b == 0, returns 0.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ * A simple a*b/c isn't possible as it can overflow.
+ *
+ * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is
+ * INT64_MIN or INT64_MAX then a is passed through unchanged.
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ *
+ * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is
+ * INT64_MIN or INT64_MAX then a is passed through unchanged.
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding) av_const;
+
+/**
+ * Compare 2 timestamps each in its own timebases.
+ * The result of the function is undefined if one of the timestamps
+ * is outside the int64_t range when represented in the others timebase.
+ * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare 2 integers modulo mod.
+ * That is we compare integers a and b for which only the least
+ * significant log2(mod) bits are known.
+ *
+ * @param mod must be a power of 2
+ * @return a negative value if a is smaller than b
+ * a positive value if a is greater than b
+ * 0 if a equals b
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * Rescale a timestamp while preserving known durations.
+ *
+ * @param in_ts Input timestamp
+ * @param in_tb Input timebase
+ * @param fs_tb Duration and *last timebase
+ * @param duration duration till the next call
+ * @param out_tb Output timebase
+ */
+int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb);
+
+/**
+ * Add a value to a timestamp.
+ *
+ * This function guarantees that when the same value is repeatly added that
+ * no accumulation of rounding errors occurs.
+ *
+ * @param ts Input timestamp
+ * @param ts_tb Input timestamp timebase
+ * @param inc value to add to ts
+ * @param inc_tb inc timebase
+ */
+int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc);
+
+
+ /**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mem.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mem.h
new file mode 100644
index 000000000..d25b3229b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mem.h
@@ -0,0 +1,406 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "attributes.h"
+#include "error.h"
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_mem
+ * @{
+ */
+
+
+#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__TI_COMPILER_VERSION__)
+ #define DECLARE_ALIGNED(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ t __attribute__((aligned(n))) v
+ #define DECLARE_ASM_CONST(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ static const t __attribute__((aligned(n))) v
+#elif defined(__GNUC__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+ #define av_malloc_attrib __attribute__((__malloc__))
+#else
+ #define av_malloc_attrib
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+ #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#else
+ #define av_alloc_size(...)
+#endif
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU).
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ */
+void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of size * nmemb bytes with av_malloc().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_malloc()
+ */
+av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size)
+{
+ if (!size || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_malloc(nmemb * size);
+}
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param size Size in bytes of the memory block to be allocated or
+ * reallocated.
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ * @see av_fast_realloc()
+ */
+void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * This function does the same thing as av_realloc, except:
+ * - It takes two arguments and checks the result of the multiplication for
+ * integer overflow.
+ * - It frees the input block in case of failure, thus avoiding the memory
+ * leak with the classic "buf = realloc(buf); if (!buf) return -1;".
+ */
+void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If *ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or pointer to a pointer to NULL.
+ * The pointer is updated on success, or freed on failure.
+ * @param size Size in bytes for the memory block to be allocated or
+ * reallocated
+ * @return Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_reallocp(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+av_warn_unused_result
+int av_reallocp(void *ptr, size_t size);
+
+/**
+ * Allocate or reallocate an array.
+ * If ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Allocate or reallocate an array through a pointer to a pointer.
+ * If *ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or pointer to a pointer to NULL.
+ * The pointer is updated on success, or freed on failure.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc().
+ * @param ptr Pointer to the memory block which should be freed.
+ * @note ptr = NULL is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead.
+ * @see av_freep()
+ */
+void av_free(void *ptr);
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ * @see av_malloc()
+ */
+void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of nmemb * size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * The allocation will fail if nmemb * size is greater than or equal
+ * to INT_MAX.
+ * @param nmemb
+ * @param size
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ */
+void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;
+
+/**
+ * Allocate a block of size * nmemb bytes with av_mallocz().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ * @see av_malloc_array()
+ */
+av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size)
+{
+ if (!size || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_mallocz(nmemb * size);
+}
+
+/**
+ * Duplicate the string s.
+ * @param s string to be duplicated
+ * @return Pointer to a newly-allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strdup(const char *s) av_malloc_attrib;
+
+/**
+ * Duplicate a substring of the string s.
+ * @param s string to be duplicated
+ * @param len the maximum length of the resulting string (not counting the
+ * terminating byte).
+ * @return Pointer to a newly-allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strndup(const char *s, size_t len) av_malloc_attrib;
+
+/**
+ * Duplicate the buffer p.
+ * @param p buffer to be duplicated
+ * @return Pointer to a newly allocated buffer containing a
+ * copy of p or NULL if the buffer cannot be allocated.
+ */
+void *av_memdup(const void *p, size_t size);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc() and set the pointer pointing to it to NULL.
+ * @param ptr Pointer to the pointer to the memory block which should
+ * be freed.
+ * @note passing a pointer to a NULL pointer is safe and leads to no action.
+ * @see av_free()
+ */
+void av_freep(void *ptr);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * The array to grow is supposed to be an array of pointers to
+ * structures, and the element to add must be a pointer to an already
+ * allocated structure.
+ *
+ * The array is reallocated when its size reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by nb_ptr
+ * is incremented.
+ * In case of failure, the array is freed, *tab_ptr is set to NULL and
+ * *nb_ptr is set to 0.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr pointer to the number of elements in the array
+ * @param elem element to add
+ * @see av_dynarray_add_nofree(), av_dynarray2_add()
+ */
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * Function has the same functionality as av_dynarray_add(),
+ * but it doesn't free memory on fails. It returns error code
+ * instead and leave current buffer untouched.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr pointer to the number of elements in the array
+ * @param elem element to add
+ * @return >=0 on success, negative otherwise.
+ * @see av_dynarray_add(), av_dynarray2_add()
+ */
+av_warn_unused_result
+int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element of size elem_size to a dynamic array.
+ *
+ * The array is reallocated when its number of elements reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by nb_ptr
+ * is incremented.
+ * In case of failure, the array is freed, *tab_ptr is set to NULL and
+ * *nb_ptr is set to 0.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr pointer to the number of elements in the array
+ * @param elem_size size in bytes of the elements in the array
+ * @param elem_data pointer to the data of the element to add. If NULL, the space of
+ * the new added element is not filled.
+ * @return pointer to the data of the element to copy in the new allocated space.
+ * If NULL, the new allocated space is left uninitialized."
+ * @see av_dynarray_add(), av_dynarray_add_nofree()
+ */
+void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
+ const uint8_t *elem_data);
+
+/**
+ * Multiply two size_t values checking for overflow.
+ * @return 0 if success, AVERROR(EINVAL) if overflow.
+ */
+static inline int av_size_mult(size_t a, size_t b, size_t *r)
+{
+ size_t t = a * b;
+ /* Hack inspired from glibc: only try the division if nelem and elsize
+ * are both greater than sqrt(SIZE_MAX). */
+ if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)
+ return AVERROR(EINVAL);
+ *r = t;
+ return 0;
+}
+
+/**
+ * Set the maximum size that may me allocated in one block.
+ */
+void av_max_alloc(size_t max);
+
+/**
+ * deliberately overlapping memcpy implementation
+ * @param dst destination buffer
+ * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0
+ * @param cnt number of bytes to copy, must be >= 0
+ *
+ * cnt > back is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of back.
+ */
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+
+/**
+ * Reallocate the given block if it is not large enough, otherwise do nothing.
+ *
+ * @see av_realloc
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ * *size 0 if an error occurred.
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * All newly allocated space is initially cleared
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ * *size 0 if an error occurred.
+ */
+void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/pixfmt.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/pixfmt.h
new file mode 100644
index 000000000..32044f077
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/pixfmt.h
@@ -0,0 +1,469 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ *
+ */
+
+#include "libavutil/avconfig.h"
+#include "version.h"
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @par
+ * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB32 palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bit with AV_PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range
+#if FF_API_XVMC
+ AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ AV_PIX_FMT_XVMC_MPEG2_IDCT,
+#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT
+#endif /* FF_API_XVMC */
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+#if FF_API_VDPAU
+ AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined
+
+#if FF_API_VAAPI
+ /** @name Deprecated pixel formats */
+ /**@{*/
+ AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ /**@}*/
+ AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD,
+#else
+ /**
+ * Hardware acceleration through VA-API, data[3] contains a
+ * VASurfaceID.
+ */
+ AV_PIX_FMT_VAAPI,
+#endif
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+#if FF_API_VDPAU
+ AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined
+ AV_PIX_FMT_YA8, ///< 8bit gray, 8bit alpha
+
+ AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+ AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+ /**
+ * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
+ * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
+ * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
+ */
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+
+ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
+
+ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+
+ AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+
+ AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
+
+ AV_PIX_FMT_VDA, ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef
+
+ AV_PIX_FMT_YA16BE, ///< 16bit gray, 16bit alpha (big-endian)
+ AV_PIX_FMT_YA16LE, ///< 16bit gray, 16bit alpha (little-endian)
+
+ AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
+ AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
+ AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
+ /**
+ * HW acceleration through QSV, data[3] contains a pointer to the
+ * mfxFrameSurface1 structure.
+ */
+ AV_PIX_FMT_QSV,
+ /**
+ * HW acceleration though MMAL, data[3] contains a pointer to the
+ * MMAL_BUFFER_HEADER_T structure.
+ */
+ AV_PIX_FMT_MMAL,
+
+ AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer
+
+ AV_PIX_FMT_0RGB=0x123+4,///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
+ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
+ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
+ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
+
+ AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
+ AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
+ AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
+ AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
+ AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range
+
+ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
+#if !FF_API_XVMC
+ AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
+#endif /* !FF_API_XVMC */
+ AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
+ AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian
+ AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian
+ AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+
+ AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox
+
+ AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+};
+
+#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A
+#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP
+
+#if AV_HAVE_BIGENDIAN
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0)
+#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0)
+
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
+#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
+#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
+#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
+#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE)
+#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
+#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)
+#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)
+#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
+#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE)
+
+#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE)
+#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE)
+#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE)
+#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE)
+
+
+#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
+#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
+#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE)
+
+/**
+ * Chromaticity coordinates of the source primaries.
+ */
+enum AVColorPrimaries {
+ AVCOL_PRI_RESERVED0 = 0,
+ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_RESERVED = 3,
+ AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
+ AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
+ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
+ AVCOL_PRI_SMPTEST428_1= 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ)
+ AVCOL_PRI_NB, ///< Not part of ABI
+};
+
+/**
+ * Color Transfer Characteristic.
+ */
+enum AVColorTransferCharacteristic {
+ AVCOL_TRC_RESERVED0 = 0,
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_RESERVED = 3,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
+ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
+ AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
+ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
+ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10 bit system
+ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12 bit system
+ AVCOL_TRC_SMPTEST2084 = 16, ///< SMPTE ST 2084 for 10, 12, 14 and 16 bit systems
+ AVCOL_TRC_SMPTEST428_1 = 17, ///< SMPTE ST 428-1
+ AVCOL_TRC_NB, ///< Not part of ABI
+};
+
+/**
+ * YUV colorspace type.
+ */
+enum AVColorSpace {
+ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_RESERVED = 3,
+ AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M = 7,
+ AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
+ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
+ AVCOL_SPC_NB, ///< Not part of ABI
+};
+#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG
+
+
+/**
+ * MPEG vs JPEG YUV range.
+ */
+enum AVColorRange {
+ AVCOL_RANGE_UNSPECIFIED = 0,
+ AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+ AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
+ AVCOL_RANGE_NB, ///< Not part of ABI
+};
+
+/**
+ * Location of chroma samples.
+ *
+ * Illustration showing the location of the first (top left) chroma sample of the
+ * image, the left shows only luma, the right
+ * shows the location of the chroma sample, the 2 could be imagined to overlay
+ * each other but are drawn separately due to limitations of ASCII
+ *
+ * 1st 2nd 1st 2nd horizontal luma sample positions
+ * v v v v
+ * ______ ______
+ *1st luma line > |X X ... |3 4 X ... X are luma samples,
+ * | |1 2 1-6 are possible chroma positions
+ *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position
+ */
+enum AVChromaLocation {
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4 4:2:0, h264 default for 4:2:0
+ AVCHROMA_LOC_CENTER = 2, ///< mpeg1 4:2:0, jpeg 4:2:0, h263 4:2:0
+ AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB, ///< Not part of ABI
+};
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/rational.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/rational.h
new file mode 100644
index 000000000..289746968
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/rational.h
@@ -0,0 +1,173 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rational numbers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+/**
+ * rational number numerator/denominator
+ */
+typedef struct AVRational{
+ int num; ///< numerator
+ int den; ///< denominator
+} AVRational;
+
+/**
+ * Create a rational.
+ * Useful for compilers that do not support compound literals.
+ * @note The return value is not reduced.
+ */
+static inline AVRational av_make_q(int num, int den)
+{
+ AVRational r = { num, den };
+ return r;
+}
+
+/**
+ * Compare two rationals.
+ * @param a first rational
+ * @param b second rational
+ * @return 0 if a==b, 1 if a>b, -1 if a<b, and INT_MIN if one of the
+ * values is of the form 0/0
+ */
+static inline int av_cmp_q(AVRational a, AVRational b){
+ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+ if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1;
+ else if(b.den && a.den) return 0;
+ else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
+ else return INT_MIN;
+}
+
+/**
+ * Convert rational to double.
+ * @param a rational to convert
+ * @return (double) a
+ */
+static inline double av_q2d(AVRational a){
+ return a.num / (double) a.den;
+}
+
+/**
+ * Reduce a fraction.
+ * This is useful for framerate calculations.
+ * @param dst_num destination numerator
+ * @param dst_den destination denominator
+ * @param num source numerator
+ * @param den source denominator
+ * @param max the maximum allowed for dst_num & dst_den
+ * @return 1 if exact, 0 otherwise
+ */
+int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b first rational
+ * @param c second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b first rational
+ * @param c second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q)
+{
+ AVRational r = { q.den, q.num };
+ return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ * inf is expressed as {1,0} or {-1,0} depending on the sign.
+ *
+ * @param d double to convert
+ * @param max the maximum allowed numerator and denominator
+ * @return (AVRational) d
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer
+ * than q1, 0 if they have the same distance.
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the nearest value in q_list to q.
+ * @param q_list an array of rationals terminated by {0, 0}
+ * @return the index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * Converts a AVRational to a IEEE 32bit float.
+ *
+ * The float is returned in a uint32_t and its value is platform indepenant.
+ */
+uint32_t av_q2intfloat(AVRational q);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/samplefmt.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/samplefmt.h
new file mode 100644
index 000000000..6a8a031c0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/samplefmt.h
@@ -0,0 +1,271 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_sampfmts Audio sample formats
+ *
+ * Audio sample format enumeration and related convenience functions.
+ * @{
+ *
+ */
+
+/**
+ * Audio sample formats
+ *
+ * - The data described by the sample format is always in native-endian order.
+ * Sample values can be expressed by native C types, hence the lack of a signed
+ * 24-bit sample format even though it is a common raw audio data format.
+ *
+ * - The floating-point formats are based on full volume being in the range
+ * [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
+ * (such as AVFrame in libavcodec) is as follows:
+ *
+ * @par
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ *
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char *name);
+
+/**
+ * Return the planar<->packed alternative form of the given sample format, or
+ * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the
+ * requested planar/packed format, the format returned is the same as the
+ * input.
+ */
+enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return the packed alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return the planar alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * @}
+ *
+ * @defgroup lavu_sampmanip Samples manipulation
+ *
+ * Functions that manipulate audio samples
+ * @{
+ */
+
+/**
+ * Fill plane data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The audio_data array is filled with the pointers to the samples data planes:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The value pointed to by linesize is set to the aligned size of each
+ * channel's data buffer for planar layout, or to the aligned size of the
+ * buffer for all channels for packed layout.
+ *
+ * The buffer in buf must be big enough to contain all the samples
+ * (use av_samples_get_buffer_size() to compute its minimum size),
+ * otherwise the audio_data pointers will point to invalid data.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return >=0 on success or a negative error code on failure
+ * @todo return minimum size in bytes required for the buffer in case
+ * of success at the next bump
+ */
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
+ const uint8_t *buf,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s), may be NULL
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return >=0 on success or a negative error code on failure
+ * @todo return the size of the allocated buffer in case of success at the next bump
+ * @see av_samples_fill_arrays()
+ * @see av_samples_alloc_array_and_samples()
+ */
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a data pointers array, samples buffer for nb_samples
+ * samples, and fill data pointers and linesize accordingly.
+ *
+ * This is the same as av_samples_alloc(), but also allocates the data
+ * pointers array.
+ *
+ * @see av_samples_alloc()
+ */
+int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data array of pointers to data planes
+ * @param offset offset in samples at which to start filling
+ * @param nb_samples number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
+ int nb_channels, enum AVSampleFormat sample_fmt);
+
+/**
+ * @}
+ * @}
+ */
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/version.h b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/version.h
new file mode 100644
index 000000000..9ffa7a866
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/version.h
@@ -0,0 +1,129 @@
+/*
+ * copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+#include "macros.h"
+
+/**
+ * @addtogroup version_utils
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c))
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * Extract version components from the full ::AV_VERSION_INT int as returned
+ * by functions like ::avformat_version() and ::avcodec_version()
+ */
+#define AV_VERSION_MAJOR(a) ((a) >> 16)
+#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8)
+#define AV_VERSION_MICRO(a) ((a) & 0xFF)
+
+/**
+ * @}
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 55
+#define LIBAVUTIL_VERSION_MINOR 12
+#define LIBAVUTIL_VERSION_MICRO 100
+
+#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @}
+ *
+ * @defgroup depr_guards Deprecation guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @note, when bumping the major version it is recommended to manually
+ * disable each FF_API_* in its own commit instead of disabling them all
+ * at once through the bump. This improves the git bisect-ability of the change.
+ *
+ * @{
+ */
+
+#ifndef FF_API_VDPAU
+#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_OPT_TYPE_METADATA
+#define FF_API_OPT_TYPE_METADATA (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_DLOG
+#define FF_API_DLOG (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_VAAPI
+#define FF_API_VAAPI (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_FRAME_QP
+#define FF_API_FRAME_QP (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_PLUS1_MINUS1
+#define FF_API_PLUS1_MINUS1 (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_ERROR_FRAME
+#define FF_API_ERROR_FRAME (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_CRC_BIG_TABLE
+#define FF_API_CRC_BIG_TABLE (LIBAVUTIL_VERSION_MAJOR < 56)
+#endif
+
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
+
diff --git a/dom/media/platforms/ffmpeg/ffmpeg57/moz.build b/dom/media/platforms/ffmpeg/ffmpeg57/moz.build
new file mode 100644
index 000000000..6bd5db8d7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffmpeg57/moz.build
@@ -0,0 +1,25 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ '../FFmpegAudioDecoder.cpp',
+ '../FFmpegDataDecoder.cpp',
+ '../FFmpegDecoderModule.cpp',
+ '../FFmpegVideoDecoder.cpp',
+]
+LOCAL_INCLUDES += [
+ '..',
+ 'include',
+]
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += [ '-Wno-deprecated-declarations' ]
+if CONFIG['CLANG_CXX']:
+ CXXFLAGS += [
+ '-Wno-unknown-attributes',
+ ]
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
new file mode 100644
index 000000000..0e95e79e6
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FFVPXRuntimeLinker.h"
+#include "FFmpegLibWrapper.h"
+#include "FFmpegLog.h"
+#include "nsIFile.h"
+#include "prmem.h"
+#include "prlink.h"
+
+// We use a known symbol located in lgpllibs to determine its location.
+// soundtouch happens to be always included in lgpllibs
+#include "soundtouch/SoundTouch.h"
+
+namespace mozilla
+{
+
+template <int V> class FFmpegDecoderModule
+{
+public:
+ static already_AddRefed<PlatformDecoderModule> Create(FFmpegLibWrapper*);
+};
+
+static FFmpegLibWrapper sFFVPXLib;
+
+FFVPXRuntimeLinker::LinkStatus FFVPXRuntimeLinker::sLinkStatus =
+ LinkStatus_INIT;
+
+static PRLibrary*
+MozAVLink(const char* aName)
+{
+ PRLibSpec lspec;
+ lspec.type = PR_LibSpec_Pathname;
+ lspec.value.pathname = aName;
+ PRLibrary* lib = PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
+ if (!lib) {
+ FFMPEG_LOG("unable to load library %s", aName);
+ }
+ return lib;
+}
+
+/* static */ bool
+FFVPXRuntimeLinker::Init()
+{
+ if (sLinkStatus) {
+ return sLinkStatus == LinkStatus_SUCCEEDED;
+ }
+
+ sLinkStatus = LinkStatus_FAILED;
+
+ // We retrieve the path of the lgpllibs library as this is where mozavcodec
+ // and mozavutil libs are located.
+ char* lgpllibsname = PR_GetLibraryName(nullptr, "lgpllibs");
+ if (!lgpllibsname) {
+ return false;
+ }
+ char* path =
+ PR_GetLibraryFilePathname(lgpllibsname,
+ (PRFuncPtr)&soundtouch::SoundTouch::getVersionId);
+ PR_FreeLibraryName(lgpllibsname);
+ if (!path) {
+ return false;
+ }
+ nsCOMPtr<nsIFile> xulFile = do_CreateInstance(NS_LOCAL_FILE_CONTRACTID);
+ if (!xulFile ||
+ NS_FAILED(xulFile->InitWithNativePath(nsDependentCString(path)))) {
+ PR_Free(path);
+ return false;
+ }
+ PR_Free(path);
+
+ nsCOMPtr<nsIFile> rootDir;
+ if (NS_FAILED(xulFile->GetParent(getter_AddRefs(rootDir))) || !rootDir) {
+ return false;
+ }
+ nsAutoCString rootPath;
+ if (NS_FAILED(rootDir->GetNativePath(rootPath))) {
+ return false;
+ }
+
+ char* libname = NULL;
+ /* Get the platform-dependent library name of the module */
+ libname = PR_GetLibraryName(rootPath.get(), "mozavutil");
+ if (!libname) {
+ return false;
+ }
+ sFFVPXLib.mAVUtilLib = MozAVLink(libname);
+ PR_FreeLibraryName(libname);
+ libname = PR_GetLibraryName(rootPath.get(), "mozavcodec");
+ if (libname) {
+ sFFVPXLib.mAVCodecLib = MozAVLink(libname);
+ PR_FreeLibraryName(libname);
+ }
+ if (sFFVPXLib.Link() == FFmpegLibWrapper::LinkResult::Success) {
+ sLinkStatus = LinkStatus_SUCCEEDED;
+ return true;
+ }
+ return false;
+}
+
+/* static */ already_AddRefed<PlatformDecoderModule>
+FFVPXRuntimeLinker::CreateDecoderModule()
+{
+ if (!Init()) {
+ return nullptr;
+ }
+ return FFmpegDecoderModule<FFVPX_VERSION>::Create(&sFFVPXLib);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h
new file mode 100644
index 000000000..f7592a751
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef __FFVPXRuntimeLinker_h__
+#define __FFVPXRuntimeLinker_h__
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla
+{
+
+class FFVPXRuntimeLinker
+{
+public:
+ static bool Init();
+ static already_AddRefed<PlatformDecoderModule> CreateDecoderModule();
+
+private:
+ static enum LinkStatus {
+ LinkStatus_INIT = 0,
+ LinkStatus_FAILED,
+ LinkStatus_SUCCEEDED
+ } sLinkStatus;
+};
+
+}
+
+#endif /* __FFVPXRuntimeLinker_h__ */
diff --git a/dom/media/platforms/ffmpeg/ffvpx/moz.build b/dom/media/platforms/ffmpeg/ffvpx/moz.build
new file mode 100644
index 000000000..aee58b5b0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/ffvpx/moz.build
@@ -0,0 +1,45 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+LOCAL_INCLUDES += ['/xpcom/build']
+EXPORTS += [
+ 'FFVPXRuntimeLinker.h',
+]
+
+UNIFIED_SOURCES += [
+ '../FFmpegAudioDecoder.cpp',
+ '../FFmpegDataDecoder.cpp',
+ '../FFmpegDecoderModule.cpp',
+ '../FFmpegVideoDecoder.cpp',
+]
+SOURCES += [
+ 'FFVPXRuntimeLinker.cpp',
+]
+LOCAL_INCLUDES += [
+ '..',
+ '../ffmpeg57/include',
+]
+
+if CONFIG['OS_ARCH'] == 'WINNT':
+ LOCAL_INCLUDES += [
+ '../ffmpeg57/include',
+ ]
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += [ '-Wno-deprecated-declarations' ]
+if CONFIG['CLANG_CXX']:
+ CXXFLAGS += [
+ '-Wno-unknown-attributes',
+ ]
+if CONFIG['_MSC_VER']:
+ CXXFLAGS += [
+ '-wd4996', # deprecated declaration
+ ]
+
+DEFINES['FFVPX_VERSION'] = 46465650
+DEFINES['USING_MOZFFVPX'] = True
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/platforms/ffmpeg/libav53/include/COPYING.LGPLv2.1 b/dom/media/platforms/ffmpeg/libav53/include/COPYING.LGPLv2.1
new file mode 100644
index 000000000..00b4fedfe
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/COPYING.LGPLv2.1
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/avcodec.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/avcodec.h
new file mode 100644
index 000000000..2451294c1
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/avcodec.h
@@ -0,0 +1,4761 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * external API header
+ */
+
+#include <errno.h>
+#include "libavutil/samplefmt.h"
+#include "libavutil/avutil.h"
+#include "libavutil/cpu.h"
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "libavcodec/version.h"
+/**
+ * @defgroup libavc Encoding/Decoding Library
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ *
+ */
+
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of a existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs.
+ */
+enum CodecID {
+ CODEC_ID_NONE,
+
+ /* video codecs */
+ CODEC_ID_MPEG1VIDEO,
+ CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ CODEC_ID_MPEG2VIDEO_XVMC,
+ CODEC_ID_H261,
+ CODEC_ID_H263,
+ CODEC_ID_RV10,
+ CODEC_ID_RV20,
+ CODEC_ID_MJPEG,
+ CODEC_ID_MJPEGB,
+ CODEC_ID_LJPEG,
+ CODEC_ID_SP5X,
+ CODEC_ID_JPEGLS,
+ CODEC_ID_MPEG4,
+ CODEC_ID_RAWVIDEO,
+ CODEC_ID_MSMPEG4V1,
+ CODEC_ID_MSMPEG4V2,
+ CODEC_ID_MSMPEG4V3,
+ CODEC_ID_WMV1,
+ CODEC_ID_WMV2,
+ CODEC_ID_H263P,
+ CODEC_ID_H263I,
+ CODEC_ID_FLV1,
+ CODEC_ID_SVQ1,
+ CODEC_ID_SVQ3,
+ CODEC_ID_DVVIDEO,
+ CODEC_ID_HUFFYUV,
+ CODEC_ID_CYUV,
+ CODEC_ID_H264,
+ CODEC_ID_INDEO3,
+ CODEC_ID_VP3,
+ CODEC_ID_THEORA,
+ CODEC_ID_ASV1,
+ CODEC_ID_ASV2,
+ CODEC_ID_FFV1,
+ CODEC_ID_4XM,
+ CODEC_ID_VCR1,
+ CODEC_ID_CLJR,
+ CODEC_ID_MDEC,
+ CODEC_ID_ROQ,
+ CODEC_ID_INTERPLAY_VIDEO,
+ CODEC_ID_XAN_WC3,
+ CODEC_ID_XAN_WC4,
+ CODEC_ID_RPZA,
+ CODEC_ID_CINEPAK,
+ CODEC_ID_WS_VQA,
+ CODEC_ID_MSRLE,
+ CODEC_ID_MSVIDEO1,
+ CODEC_ID_IDCIN,
+ CODEC_ID_8BPS,
+ CODEC_ID_SMC,
+ CODEC_ID_FLIC,
+ CODEC_ID_TRUEMOTION1,
+ CODEC_ID_VMDVIDEO,
+ CODEC_ID_MSZH,
+ CODEC_ID_ZLIB,
+ CODEC_ID_QTRLE,
+ CODEC_ID_SNOW,
+ CODEC_ID_TSCC,
+ CODEC_ID_ULTI,
+ CODEC_ID_QDRAW,
+ CODEC_ID_VIXL,
+ CODEC_ID_QPEG,
+ CODEC_ID_PNG,
+ CODEC_ID_PPM,
+ CODEC_ID_PBM,
+ CODEC_ID_PGM,
+ CODEC_ID_PGMYUV,
+ CODEC_ID_PAM,
+ CODEC_ID_FFVHUFF,
+ CODEC_ID_RV30,
+ CODEC_ID_RV40,
+ CODEC_ID_VC1,
+ CODEC_ID_WMV3,
+ CODEC_ID_LOCO,
+ CODEC_ID_WNV1,
+ CODEC_ID_AASC,
+ CODEC_ID_INDEO2,
+ CODEC_ID_FRAPS,
+ CODEC_ID_TRUEMOTION2,
+ CODEC_ID_BMP,
+ CODEC_ID_CSCD,
+ CODEC_ID_MMVIDEO,
+ CODEC_ID_ZMBV,
+ CODEC_ID_AVS,
+ CODEC_ID_SMACKVIDEO,
+ CODEC_ID_NUV,
+ CODEC_ID_KMVC,
+ CODEC_ID_FLASHSV,
+ CODEC_ID_CAVS,
+ CODEC_ID_JPEG2000,
+ CODEC_ID_VMNC,
+ CODEC_ID_VP5,
+ CODEC_ID_VP6,
+ CODEC_ID_VP6F,
+ CODEC_ID_TARGA,
+ CODEC_ID_DSICINVIDEO,
+ CODEC_ID_TIERTEXSEQVIDEO,
+ CODEC_ID_TIFF,
+ CODEC_ID_GIF,
+#if LIBAVCODEC_VERSION_MAJOR == 53
+ CODEC_ID_FFH264,
+#endif
+ CODEC_ID_DXA,
+ CODEC_ID_DNXHD,
+ CODEC_ID_THP,
+ CODEC_ID_SGI,
+ CODEC_ID_C93,
+ CODEC_ID_BETHSOFTVID,
+ CODEC_ID_PTX,
+ CODEC_ID_TXD,
+ CODEC_ID_VP6A,
+ CODEC_ID_AMV,
+ CODEC_ID_VB,
+ CODEC_ID_PCX,
+ CODEC_ID_SUNRAST,
+ CODEC_ID_INDEO4,
+ CODEC_ID_INDEO5,
+ CODEC_ID_MIMIC,
+ CODEC_ID_RL2,
+#if LIBAVCODEC_VERSION_MAJOR == 53
+ CODEC_ID_8SVX_EXP,
+ CODEC_ID_8SVX_FIB,
+#endif
+ CODEC_ID_ESCAPE124,
+ CODEC_ID_DIRAC,
+ CODEC_ID_BFI,
+ CODEC_ID_CMV,
+ CODEC_ID_MOTIONPIXELS,
+ CODEC_ID_TGV,
+ CODEC_ID_TGQ,
+ CODEC_ID_TQI,
+ CODEC_ID_AURA,
+ CODEC_ID_AURA2,
+ CODEC_ID_V210X,
+ CODEC_ID_TMV,
+ CODEC_ID_V210,
+ CODEC_ID_DPX,
+ CODEC_ID_MAD,
+ CODEC_ID_FRWU,
+ CODEC_ID_FLASHSV2,
+ CODEC_ID_CDGRAPHICS,
+ CODEC_ID_R210,
+ CODEC_ID_ANM,
+ CODEC_ID_BINKVIDEO,
+ CODEC_ID_IFF_ILBM,
+ CODEC_ID_IFF_BYTERUN1,
+ CODEC_ID_KGV1,
+ CODEC_ID_YOP,
+ CODEC_ID_VP8,
+ CODEC_ID_PICTOR,
+ CODEC_ID_ANSI,
+ CODEC_ID_A64_MULTI,
+ CODEC_ID_A64_MULTI5,
+ CODEC_ID_R10K,
+ CODEC_ID_MXPEG,
+ CODEC_ID_LAGARITH,
+ CODEC_ID_PRORES,
+ CODEC_ID_JV,
+ CODEC_ID_DFA,
+ CODEC_ID_WMV3IMAGE,
+ CODEC_ID_VC1IMAGE,
+#if LIBAVCODEC_VERSION_MAJOR == 53
+ CODEC_ID_G723_1,
+ CODEC_ID_G729,
+#endif
+ CODEC_ID_UTVIDEO,
+ CODEC_ID_BMV_VIDEO,
+ CODEC_ID_VBLE,
+ CODEC_ID_DXTORY,
+ CODEC_ID_V410,
+
+ /* various PCM "codecs" */
+ CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ CODEC_ID_PCM_S16LE = 0x10000,
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_PCM_U16LE,
+ CODEC_ID_PCM_U16BE,
+ CODEC_ID_PCM_S8,
+ CODEC_ID_PCM_U8,
+ CODEC_ID_PCM_MULAW,
+ CODEC_ID_PCM_ALAW,
+ CODEC_ID_PCM_S32LE,
+ CODEC_ID_PCM_S32BE,
+ CODEC_ID_PCM_U32LE,
+ CODEC_ID_PCM_U32BE,
+ CODEC_ID_PCM_S24LE,
+ CODEC_ID_PCM_S24BE,
+ CODEC_ID_PCM_U24LE,
+ CODEC_ID_PCM_U24BE,
+ CODEC_ID_PCM_S24DAUD,
+ CODEC_ID_PCM_ZORK,
+ CODEC_ID_PCM_S16LE_PLANAR,
+ CODEC_ID_PCM_DVD,
+ CODEC_ID_PCM_F32BE,
+ CODEC_ID_PCM_F32LE,
+ CODEC_ID_PCM_F64BE,
+ CODEC_ID_PCM_F64LE,
+ CODEC_ID_PCM_BLURAY,
+ CODEC_ID_PCM_LXF,
+ CODEC_ID_S302M,
+ CODEC_ID_PCM_S8_PLANAR,
+
+ /* various ADPCM codecs */
+ CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ CODEC_ID_ADPCM_IMA_WAV,
+ CODEC_ID_ADPCM_IMA_DK3,
+ CODEC_ID_ADPCM_IMA_DK4,
+ CODEC_ID_ADPCM_IMA_WS,
+ CODEC_ID_ADPCM_IMA_SMJPEG,
+ CODEC_ID_ADPCM_MS,
+ CODEC_ID_ADPCM_4XM,
+ CODEC_ID_ADPCM_XA,
+ CODEC_ID_ADPCM_ADX,
+ CODEC_ID_ADPCM_EA,
+ CODEC_ID_ADPCM_G726,
+ CODEC_ID_ADPCM_CT,
+ CODEC_ID_ADPCM_SWF,
+ CODEC_ID_ADPCM_YAMAHA,
+ CODEC_ID_ADPCM_SBPRO_4,
+ CODEC_ID_ADPCM_SBPRO_3,
+ CODEC_ID_ADPCM_SBPRO_2,
+ CODEC_ID_ADPCM_THP,
+ CODEC_ID_ADPCM_IMA_AMV,
+ CODEC_ID_ADPCM_EA_R1,
+ CODEC_ID_ADPCM_EA_R3,
+ CODEC_ID_ADPCM_EA_R2,
+ CODEC_ID_ADPCM_IMA_EA_SEAD,
+ CODEC_ID_ADPCM_IMA_EA_EACS,
+ CODEC_ID_ADPCM_EA_XAS,
+ CODEC_ID_ADPCM_EA_MAXIS_XA,
+ CODEC_ID_ADPCM_IMA_ISS,
+ CODEC_ID_ADPCM_G722,
+
+ /* AMR */
+ CODEC_ID_AMR_NB = 0x12000,
+ CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ CODEC_ID_RA_144 = 0x13000,
+ CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ CODEC_ID_ROQ_DPCM = 0x14000,
+ CODEC_ID_INTERPLAY_DPCM,
+ CODEC_ID_XAN_DPCM,
+ CODEC_ID_SOL_DPCM,
+
+ /* audio codecs */
+ CODEC_ID_MP2 = 0x15000,
+ CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ CODEC_ID_AAC,
+ CODEC_ID_AC3,
+ CODEC_ID_DTS,
+ CODEC_ID_VORBIS,
+ CODEC_ID_DVAUDIO,
+ CODEC_ID_WMAV1,
+ CODEC_ID_WMAV2,
+ CODEC_ID_MACE3,
+ CODEC_ID_MACE6,
+ CODEC_ID_VMDAUDIO,
+#if LIBAVCODEC_VERSION_MAJOR == 53
+ CODEC_ID_SONIC,
+ CODEC_ID_SONIC_LS,
+#endif
+ CODEC_ID_FLAC,
+ CODEC_ID_MP3ADU,
+ CODEC_ID_MP3ON4,
+ CODEC_ID_SHORTEN,
+ CODEC_ID_ALAC,
+ CODEC_ID_WESTWOOD_SND1,
+ CODEC_ID_GSM, ///< as in Berlin toast format
+ CODEC_ID_QDM2,
+ CODEC_ID_COOK,
+ CODEC_ID_TRUESPEECH,
+ CODEC_ID_TTA,
+ CODEC_ID_SMACKAUDIO,
+ CODEC_ID_QCELP,
+ CODEC_ID_WAVPACK,
+ CODEC_ID_DSICINAUDIO,
+ CODEC_ID_IMC,
+ CODEC_ID_MUSEPACK7,
+ CODEC_ID_MLP,
+ CODEC_ID_GSM_MS, /* as found in WAV */
+ CODEC_ID_ATRAC3,
+ CODEC_ID_VOXWARE,
+ CODEC_ID_APE,
+ CODEC_ID_NELLYMOSER,
+ CODEC_ID_MUSEPACK8,
+ CODEC_ID_SPEEX,
+ CODEC_ID_WMAVOICE,
+ CODEC_ID_WMAPRO,
+ CODEC_ID_WMALOSSLESS,
+ CODEC_ID_ATRAC3P,
+ CODEC_ID_EAC3,
+ CODEC_ID_SIPR,
+ CODEC_ID_MP1,
+ CODEC_ID_TWINVQ,
+ CODEC_ID_TRUEHD,
+ CODEC_ID_MP4ALS,
+ CODEC_ID_ATRAC1,
+ CODEC_ID_BINKAUDIO_RDFT,
+ CODEC_ID_BINKAUDIO_DCT,
+ CODEC_ID_AAC_LATM,
+ CODEC_ID_QDMC,
+ CODEC_ID_CELT,
+#if LIBAVCODEC_VERSION_MAJOR > 53
+ CODEC_ID_G723_1,
+ CODEC_ID_G729,
+ CODEC_ID_8SVX_EXP,
+ CODEC_ID_8SVX_FIB,
+#endif
+ CODEC_ID_BMV_AUDIO,
+
+ /* subtitle codecs */
+ CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ CODEC_ID_DVD_SUBTITLE = 0x17000,
+ CODEC_ID_DVB_SUBTITLE,
+ CODEC_ID_TEXT, ///< raw UTF-8 text
+ CODEC_ID_XSUB,
+ CODEC_ID_SSA,
+ CODEC_ID_MOV_TEXT,
+ CODEC_ID_HDMV_PGS_SUBTITLE,
+ CODEC_ID_DVB_TELETEXT,
+ CODEC_ID_SRT,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ CODEC_ID_TTF = 0x18000,
+
+ CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
+
+ CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+};
+
+#if FF_API_OLD_SAMPLE_FMT
+#define SampleFormat AVSampleFormat
+
+#define SAMPLE_FMT_NONE AV_SAMPLE_FMT_NONE
+#define SAMPLE_FMT_U8 AV_SAMPLE_FMT_U8
+#define SAMPLE_FMT_S16 AV_SAMPLE_FMT_S16
+#define SAMPLE_FMT_S32 AV_SAMPLE_FMT_S32
+#define SAMPLE_FMT_FLT AV_SAMPLE_FMT_FLT
+#define SAMPLE_FMT_DBL AV_SAMPLE_FMT_DBL
+#define SAMPLE_FMT_NB AV_SAMPLE_FMT_NB
+#endif
+
+#if FF_API_OLD_AUDIOCONVERT
+#include "libavutil/audioconvert.h"
+
+/* Audio channel masks */
+#define CH_FRONT_LEFT AV_CH_FRONT_LEFT
+#define CH_FRONT_RIGHT AV_CH_FRONT_RIGHT
+#define CH_FRONT_CENTER AV_CH_FRONT_CENTER
+#define CH_LOW_FREQUENCY AV_CH_LOW_FREQUENCY
+#define CH_BACK_LEFT AV_CH_BACK_LEFT
+#define CH_BACK_RIGHT AV_CH_BACK_RIGHT
+#define CH_FRONT_LEFT_OF_CENTER AV_CH_FRONT_LEFT_OF_CENTER
+#define CH_FRONT_RIGHT_OF_CENTER AV_CH_FRONT_RIGHT_OF_CENTER
+#define CH_BACK_CENTER AV_CH_BACK_CENTER
+#define CH_SIDE_LEFT AV_CH_SIDE_LEFT
+#define CH_SIDE_RIGHT AV_CH_SIDE_RIGHT
+#define CH_TOP_CENTER AV_CH_TOP_CENTER
+#define CH_TOP_FRONT_LEFT AV_CH_TOP_FRONT_LEFT
+#define CH_TOP_FRONT_CENTER AV_CH_TOP_FRONT_CENTER
+#define CH_TOP_FRONT_RIGHT AV_CH_TOP_FRONT_RIGHT
+#define CH_TOP_BACK_LEFT AV_CH_TOP_BACK_LEFT
+#define CH_TOP_BACK_CENTER AV_CH_TOP_BACK_CENTER
+#define CH_TOP_BACK_RIGHT AV_CH_TOP_BACK_RIGHT
+#define CH_STEREO_LEFT AV_CH_STEREO_LEFT
+#define CH_STEREO_RIGHT AV_CH_STEREO_RIGHT
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order. */
+#define CH_LAYOUT_NATIVE AV_CH_LAYOUT_NATIVE
+
+/* Audio channel convenience macros */
+#define CH_LAYOUT_MONO AV_CH_LAYOUT_MONO
+#define CH_LAYOUT_STEREO AV_CH_LAYOUT_STEREO
+#define CH_LAYOUT_2_1 AV_CH_LAYOUT_2_1
+#define CH_LAYOUT_SURROUND AV_CH_LAYOUT_SURROUND
+#define CH_LAYOUT_4POINT0 AV_CH_LAYOUT_4POINT0
+#define CH_LAYOUT_2_2 AV_CH_LAYOUT_2_2
+#define CH_LAYOUT_QUAD AV_CH_LAYOUT_QUAD
+#define CH_LAYOUT_5POINT0 AV_CH_LAYOUT_5POINT0
+#define CH_LAYOUT_5POINT1 AV_CH_LAYOUT_5POINT1
+#define CH_LAYOUT_5POINT0_BACK AV_CH_LAYOUT_5POINT0_BACK
+#define CH_LAYOUT_5POINT1_BACK AV_CH_LAYOUT_5POINT1_BACK
+#define CH_LAYOUT_7POINT0 AV_CH_LAYOUT_7POINT0
+#define CH_LAYOUT_7POINT1 AV_CH_LAYOUT_7POINT1
+#define CH_LAYOUT_7POINT1_WIDE AV_CH_LAYOUT_7POINT1_WIDE
+#define CH_LAYOUT_STEREO_DOWNMIX AV_CH_LAYOUT_STEREO_DOWNMIX
+#endif
+
+#if FF_API_OLD_DECODE_AUDIO
+/* in bytes */
+#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
+#endif
+
+/**
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define FF_INPUT_BUFFER_PADDING_SIZE 8
+
+/**
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define FF_MIN_BUFFER_SIZE 16384
+
+
+/**
+ * motion estimation type.
+ */
+enum Motion_Est_ID {
+ ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed
+ ME_FULL,
+ ME_LOG,
+ ME_PHODS,
+ ME_EPZS, ///< enhanced predictive zonal search
+ ME_X1, ///< reserved for experiments
+ ME_HEX, ///< hexagon based search
+ ME_UMH, ///< uneven multi-hexagon search
+ ME_ITER, ///< iterative search
+ ME_TESA, ///< transformed exhaustive search algorithm
+};
+
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT= 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVColorPrimaries{
+ AVCOL_PRI_BT709 =1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_UNSPECIFIED=2,
+ AVCOL_PRI_BT470M =4,
+ AVCOL_PRI_BT470BG =5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M =6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M =7, ///< functionally identical to above
+ AVCOL_PRI_FILM =8,
+ AVCOL_PRI_NB , ///< Not part of ABI
+};
+
+enum AVColorTransferCharacteristic{
+ AVCOL_TRC_BT709 =1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED=2,
+ AVCOL_TRC_GAMMA22 =4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 =5, ///< also ITU-R BT470BG
+ AVCOL_TRC_NB , ///< Not part of ABI
+};
+
+enum AVColorSpace{
+ AVCOL_SPC_RGB =0,
+ AVCOL_SPC_BT709 =1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_UNSPECIFIED=2,
+ AVCOL_SPC_FCC =4,
+ AVCOL_SPC_BT470BG =5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M =6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M =7,
+ AVCOL_SPC_NB , ///< Not part of ABI
+};
+
+enum AVColorRange{
+ AVCOL_RANGE_UNSPECIFIED=0,
+ AVCOL_RANGE_MPEG =1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+ AVCOL_RANGE_JPEG =2, ///< the normal 2^n-1 "JPEG" YUV ranges
+ AVCOL_RANGE_NB , ///< Not part of ABI
+};
+
+/**
+ * X X 3 4 X X are luma samples,
+ * 1 2 1-6 are possible chroma positions
+ * X X 5 6 X 0 is undefined/unknown position
+ */
+enum AVChromaLocation{
+ AVCHROMA_LOC_UNSPECIFIED=0,
+ AVCHROMA_LOC_LEFT =1, ///< mpeg2/4, h264 default
+ AVCHROMA_LOC_CENTER =2, ///< mpeg1, jpeg, h263
+ AVCHROMA_LOC_TOPLEFT =3, ///< DV
+ AVCHROMA_LOC_TOP =4,
+ AVCHROMA_LOC_BOTTOMLEFT =5,
+ AVCHROMA_LOC_BOTTOM =6,
+ AVCHROMA_LOC_NB , ///< Not part of ABI
+};
+
+#if FF_API_FLAC_GLOBAL_OPTS
+/**
+ * LPC analysis type
+ */
+enum AVLPCType {
+ AV_LPC_TYPE_DEFAULT = -1, ///< use the codec default LPC type
+ AV_LPC_TYPE_NONE = 0, ///< do not use LPC prediction or use all zero coefficients
+ AV_LPC_TYPE_FIXED = 1, ///< fixed LPC coefficients
+ AV_LPC_TYPE_LEVINSON = 2, ///< Levinson-Durbin recursion
+ AV_LPC_TYPE_CHOLESKY = 3, ///< Cholesky factorization
+ AV_LPC_TYPE_NB , ///< Not part of ABI
+};
+#endif
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
+};
+
+typedef struct RcOverride{
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+#define FF_MAX_B_FRAMES 16
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale.
+#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263.
+#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC.
+#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
+#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>.
+/**
+ * The parent program guarantees that the input for B-frames containing
+ * streams is not written to for at least s->max_b_frames+1 frames, if
+ * this is not set the input will be copied.
+ */
+#define CODEC_FLAG_INPUT_PRESERVED 0x0100
+#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode.
+#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode.
+#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale.
+#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges.
+#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding.
+#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random
+ location instead of only at frame boundaries. */
+#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization.
+#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.
+#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay.
+#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe.
+#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT).
+/* Fx : Flag for h263+ extra options */
+#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
+#define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp.
+#define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon.
+#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
+#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
+#define CODEC_FLAG_CLOSED_GOP 0x80000000
+#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks.
+#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
+#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
+#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
+#define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping
+#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
+/**
+ * @defgroup deprecated_flags Deprecated codec flags
+ * Use corresponding private codec options instead.
+ * @{
+ */
+#if FF_API_MPEGVIDEO_GLOBAL_OPTS
+#define CODEC_FLAG_OBMC 0x00000001 ///< OBMC
+#define CODEC_FLAG_H263P_AIV 0x00000008 ///< H.263 alternative inter VLC
+#define CODEC_FLAG_PART 0x0080 ///< Use data partitioning.
+#define CODEC_FLAG_ALT_SCAN 0x00100000 ///< Use alternate scan.
+#define CODEC_FLAG_H263P_UMV 0x02000000 ///< unlimited motion vector
+#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000
+#define CODEC_FLAG_SVCD_SCAN_OFFSET 0x40000000 ///< Will reserve space for SVCD scan offset user data.
+#define CODEC_FLAG2_INTRA_VLC 0x00000800 ///< Use MPEG-2 intra VLC table.
+#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format.
+#define CODEC_FLAG2_NON_LINEAR_QUANT 0x00010000 ///< Use MPEG-2 nonlinear quantizer.
+#endif
+#if FF_API_MJPEG_GLOBAL_OPTS
+#define CODEC_FLAG_EXTERN_HUFF 0x1000 ///< Use external Huffman table (for MJPEG).
+#endif
+#if FF_API_X264_GLOBAL_OPTS
+#define CODEC_FLAG2_BPYRAMID 0x00000010 ///< H.264 allow B-frames to be used as references.
+#define CODEC_FLAG2_WPRED 0x00000020 ///< H.264 weighted biprediction for B-frames
+#define CODEC_FLAG2_MIXED_REFS 0x00000040 ///< H.264 one reference per partition, as opposed to one reference per macroblock
+#define CODEC_FLAG2_8X8DCT 0x00000080 ///< H.264 high profile 8x8 transform
+#define CODEC_FLAG2_FASTPSKIP 0x00000100 ///< H.264 fast pskip
+#define CODEC_FLAG2_AUD 0x00000200 ///< H.264 access unit delimiters
+#define CODEC_FLAG2_BRDO 0x00000400 ///< B-frame rate-distortion optimization
+#define CODEC_FLAG2_MBTREE 0x00040000 ///< Use macroblock tree ratecontrol (x264 only)
+#define CODEC_FLAG2_PSY 0x00080000 ///< Use psycho visual optimizations.
+#define CODEC_FLAG2_SSIM 0x00100000 ///< Compute SSIM during encoding, error[] values are undefined.
+#define CODEC_FLAG2_INTRA_REFRESH 0x00200000 ///< Use periodic insertion of intra blocks instead of keyframes.
+#endif
+#if FF_API_SNOW_GLOBAL_OPTS
+#define CODEC_FLAG2_MEMC_ONLY 0x00001000 ///< Only do ME/MC (I frames -> ref, P frame -> ME+MC).
+#endif
+#if FF_API_LAME_GLOBAL_OPTS
+#define CODEC_FLAG2_BIT_RESERVOIR 0x00020000 ///< Use a bit reservoir when encoding if possible
+#endif
+/**
+ * @}
+ */
+
+/* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.
+/**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+#define CODEC_CAP_DR1 0x0002
+#if FF_API_PARSE_FRAME
+/* If 'parse_only' field is true, then avcodec_parse_frame() can be used. */
+#define CODEC_CAP_PARSE_ONLY 0x0004
+#endif
+#define CODEC_CAP_TRUNCATED 0x0008
+/* Codec can export data for HW decoding (XvMC). */
+#define CODEC_CAP_HWACCEL 0x0010
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define CODEC_CAP_DELAY 0x0020
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define CODEC_CAP_SMALL_LAST_FRAME 0x0040
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define CODEC_CAP_HWACCEL_VDPAU 0x0080
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define CODEC_CAP_SUBFRAMES 0x0100
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define CODEC_CAP_EXPERIMENTAL 0x0200
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define CODEC_CAP_CHANNEL_CONF 0x0400
+/**
+ * Codec is able to deal with negative linesizes
+ */
+#define CODEC_CAP_NEG_LINESIZES 0x0800
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define CODEC_CAP_FRAME_THREADS 0x1000
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define CODEC_CAP_SLICE_THREADS 0x2000
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define CODEC_CAP_PARAM_CHANGE 0x4000
+/**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+#define CODEC_CAP_AUTO_THREADS 0x8000
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
+
+//The following defines may change, don't expect compatibility if you use them.
+#define MB_TYPE_INTRA4x4 0x0001
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific
+#define MB_TYPE_16x16 0x0008
+#define MB_TYPE_16x8 0x0010
+#define MB_TYPE_8x16 0x0020
+#define MB_TYPE_8x8 0x0040
+#define MB_TYPE_INTERLACED 0x0080
+#define MB_TYPE_DIRECT2 0x0100 //FIXME
+#define MB_TYPE_ACPRED 0x0200
+#define MB_TYPE_GMC 0x0400
+#define MB_TYPE_SKIP 0x0800
+#define MB_TYPE_P0L0 0x1000
+#define MB_TYPE_P1L0 0x2000
+#define MB_TYPE_P0L1 0x4000
+#define MB_TYPE_P1L1 0x8000
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT 0x00010000
+#define MB_TYPE_CBP 0x00020000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan{
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+}AVPanScan;
+
+#define FF_QSCALE_TYPE_MPEG1 0
+#define FF_QSCALE_TYPE_MPEG2 1
+#define FF_QSCALE_TYPE_H264 2
+#define FF_QSCALE_TYPE_VP56 3
+
+#define FF_BUFFER_TYPE_INTERNAL 1
+#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
+#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
+#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.
+
+#if FF_API_OLD_FF_PICT_TYPES
+/* DEPRECATED, directly use the AV_PICTURE_TYPE_* enum values */
+#define FF_I_TYPE AV_PICTURE_TYPE_I ///< Intra
+#define FF_P_TYPE AV_PICTURE_TYPE_P ///< Predicted
+#define FF_B_TYPE AV_PICTURE_TYPE_B ///< Bi-dir predicted
+#define FF_S_TYPE AV_PICTURE_TYPE_S ///< S(GMC)-VOP MPEG4
+#define FF_SI_TYPE AV_PICTURE_TYPE_SI ///< Switching Intra
+#define FF_SP_TYPE AV_PICTURE_TYPE_SP ///< Switching Predicted
+#define FF_BI_TYPE AV_PICTURE_TYPE_BI
+#endif
+
+#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore).
+#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
+#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
+#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+
+enum AVPacketSideDataType {
+ AV_PKT_DATA_PALETTE,
+ AV_PKT_DATA_NEW_EXTRADATA,
+ AV_PKT_DATA_PARAM_CHANGE,
+};
+
+typedef struct AVPacket {
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t *data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ struct {
+ uint8_t *data;
+ int size;
+ enum AVPacketSideDataType type;
+ } *side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int duration;
+ void (*destruct)(struct AVPacket *);
+ void *priv;
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * Time difference in AVStream->time_base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current packet.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+} AVPacket;
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+
+/**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ */
+
+enum AVSideDataParamChangeFlags {
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+
+/**
+ * Audio Video Frame.
+ * New fields can be added to the end of AVFRAME with minor version
+ * bumps. Removal, reordering and changes to existing fields require
+ * a major version bump.
+ * sizeof(AVFrame) must not be used outside libav*.
+ */
+typedef struct AVFrame {
+#if FF_API_DATA_POINTERS
+#define AV_NUM_DATA_POINTERS 4
+#else
+#define AV_NUM_DATA_POINTERS 8
+#endif
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte
+ * - encoding: Set by user
+ * - decoding: set by AVCodecContext.get_buffer()
+ */
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * Size, in bytes, of the data for each picture/channel plane.
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * - encoding: Set by user (video only)
+ * - decoding: set by AVCodecContext.get_buffer()
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
+ * This isn't used by libavcodec unless the default get/release_buffer() is used.
+ * - encoding:
+ * - decoding:
+ */
+ uint8_t *base[AV_NUM_DATA_POINTERS];
+ /**
+ * 1 -> keyframe, 0-> not
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame, see ?_TYPE below.
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
+ * - decoding: Set by libavcodec.
+ */
+ enum AVPictureType pict_type;
+
+ /**
+ * presentation timestamp in time_base units (time when frame should be shown to user)
+ * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int64_t pts;
+
+ /**
+ * picture number in bitstream order
+ * - encoding: set by
+ * - decoding: Set by libavcodec.
+ */
+ int coded_picture_number;
+ /**
+ * picture number in display order
+ * - encoding: set by
+ * - decoding: Set by libavcodec.
+ */
+ int display_picture_number;
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
+ * - decoding: Set by libavcodec.
+ */
+ int quality;
+
+#if FF_API_AVFRAME_AGE
+ /**
+ * @deprecated unused
+ */
+ attribute_deprecated int age;
+#endif
+
+ /**
+ * is this picture used as reference
+ * The values for this are the same as the MpegEncContext.picture_structure
+ * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
+ * Set to 4 for delayed, non-reference frames.
+ * - encoding: unused
+ * - decoding: Set by libavcodec. (before get_buffer() call)).
+ */
+ int reference;
+
+ /**
+ * QP table
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int8_t *qscale_table;
+ /**
+ * QP store stride
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int qstride;
+
+ /**
+ * mbskip_table[mb]>=1 if MB didn't change
+ * stride= mb_width = (width+15)>>4
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ uint8_t *mbskip_table;
+
+ /**
+ * motion vector table
+ * @code
+ * example:
+ * int mv_sample_log2= 4 - motion_subsample_log2;
+ * int mb_width= (width+15)>>4;
+ * int mv_stride= (mb_width << mv_sample_log2) + 1;
+ * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
+ * @endcode
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t (*motion_val[2])[2];
+
+ /**
+ * macroblock type table
+ * mb_type_base + mb_width + 2
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ uint32_t *mb_type;
+
+ /**
+ * log2 of the size of the block which a single vector in motion_val represents:
+ * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ uint8_t motion_subsample_log2;
+
+ /**
+ * for some private data of the user
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * type of the buffer (to keep track of who has to deallocate data[*])
+ * - encoding: Set by the one who allocates it.
+ * - decoding: Set by the one who allocates it.
+ * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.
+ */
+ int type;
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int repeat_pict;
+
+ /**
+ *
+ */
+ int qscale_type;
+
+ /**
+ * The content of the picture is interlaced.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec. (default 0)
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int top_field_first;
+
+ /**
+ * Pan scan.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVPanScan *pan_scan;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ * - encoding: ??? (no palette-enabled encoder yet)
+ * - decoding: Set by libavcodec. (default 0).
+ */
+ int palette_has_changed;
+
+ /**
+ * codec suggestion on buffer type if != 0
+ * - encoding: unused
+ * - decoding: Set by libavcodec. (before get_buffer() call)).
+ */
+ int buffer_hints;
+
+ /**
+ * DCT coefficients
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ short *dct_coeff;
+
+ /**
+ * motion reference frame index
+ * the order in which these are stored can depend on the codec.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int8_t *ref_index[2];
+
+ /**
+ * reordered opaque 64bit (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * hardware accelerator private data (Libav-allocated)
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ void *hwaccel_picture_private;
+
+ /**
+ * reordered pts from the last AVPacket that has been input into the decoder
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_pts;
+
+ /**
+ * dts from the last AVPacket that has been input into the decoder
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * the AVCodecContext which ff_thread_get_buffer() was last called on
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ struct AVCodecContext *owner;
+
+ /**
+ * used by multithreading to store frame-specific info
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ void *thread_opaque;
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ int nb_samples;
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data will always be set by get_buffer(),
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used by the decoder in order to access all
+ * channels.
+ *
+ * encoding: unused
+ * decoding: set by AVCodecContext.get_buffer()
+ */
+ uint8_t **extended_data;
+
+ /**
+ * sample aspect ratio for the video frame, 0/1 if unknown\unspecified
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * width and height of the video frame
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int width, height;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum PixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int format;
+} AVFrame;
+
+struct AVCodecInternal;
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, //< Top coded_first, top displayed first
+ AV_FIELD_BB, //< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, //< Top coded first, bottom displayed first
+ AV_FIELD_BT, //< Bottom coded first, top displayed first
+};
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass *av_class;
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.
+ */
+ int bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * Some codecs need additional format info. It is stored here.
+ * If any muxer uses this then ALL demuxers/parsers AND encoders for the
+ * specific codec MUST set it correctly otherwise stream copy breaks.
+ * In general use of this field by muxers is not recommended.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec. (FIXME: Is this OK?)
+ */
+ int sub_id;
+
+ /**
+ * Motion estimation algorithm used for video coding.
+ * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),
+ * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]
+ * - encoding: MUST be set by user.
+ * - decoding: unused
+ */
+ int me_method;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * mjpeg: Huffman tables
+ * rv10: additional flags
+ * mpeg4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid prolems if it is read with the bitstream reader.
+ * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t *extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational time_base;
+
+ /* video only */
+ /**
+ * picture width / height.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ * Note: For compatibility it is possible to set this instead of
+ * coded_width/height before decoding.
+ */
+ int width, height;
+
+#define FF_ASPECT_EXTENDED 15
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overriden by the decoder if it knows better.
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec if known
+ */
+ enum PixelFormat pix_fmt;
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext *s,
+ const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
+ int y, int type, int height);
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+ int channels; ///< number of audio channels
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Samples per packet, initialized when calling 'init'.
+ */
+ int frame_size;
+ int frame_number; ///< audio or video frame number
+
+ /**
+ * Number of frames the decoded output will be delayed relative to
+ * the encoded input.
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ int delay;
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /** obsolete FIXME remove */
+ int rc_strategy;
+#define FF_RC_STRATEGY_XVID 1
+
+ int b_frame_strategy;
+
+ struct AVCodec *codec;
+
+ void *priv_data;
+
+ int rtp_payload_size; /* The size of the RTP payload: the coder will */
+ /* do its best to deliver a chunk with size */
+ /* below rtp_payload_size, the chunk will start */
+ /* with a start code on some codecs like H.263. */
+ /* This doesn't take account of any particular */
+ /* headers inside the transmitted RTP payload. */
+
+
+ /* The RTP callback: This function is called */
+ /* every time the encoder has a packet to send. */
+ /* It depends on the encoder if the data starts */
+ /* with a Start Code (it should). H.263 does. */
+ /* mb_nb contains the number of macroblocks */
+ /* encoded in the RTP payload. */
+ void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
+
+ /* statistics, used for 2-pass encoding */
+ int mv_bits;
+ int header_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int i_count;
+ int p_count;
+ int skip_count;
+ int misc_bits;
+
+ /**
+ * number of bits used for the previously encoded frame
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ int frame_bits;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ char codec_name[32];
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ enum CodecID codec_id; /* see CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify the codec.
+ * If there are multiple such fields in a container then the demuxer should choose the one
+ * which maximizes the information about the used codec.
+ * If the codec tag field in a container is larger than 32 bits then the demuxer should
+ * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+ * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+ * first.
+ * - encoding: Set by user, if not then the default based on codec_id will be used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int codec_tag;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#define FF_BUG_OLD_MSMPEG4 2
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default.
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+//#define FF_BUG_FAKE_SCALABILITY 16 //Autodetection should work 100%.
+
+ /**
+ * luma single coefficient elimination threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int luma_elim_threshold;
+
+ /**
+ * chroma single coeff elimination threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chroma_elim_threshold;
+
+ /**
+ * strictly follow the standard (MPEG4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ */
+ int strict_std_compliance;
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+#if FF_API_ER
+ /**
+ * Error recognition; higher values will detect more errors but may
+ * misdetect some more or less valid parts as errors.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ attribute_deprecated int error_recognition;
+#define FF_ER_CAREFUL 1
+#define FF_ER_COMPLIANT 2
+#define FF_ER_AGGRESSIVE 3
+#define FF_ER_VERY_AGGRESSIVE 4
+#define FF_ER_EXPLODE 5
+#endif /* FF_API_ER */
+
+ /**
+ * Called at the beginning of each frame to get a buffer for it.
+ *
+ * The function will set AVFrame.data[], AVFrame.linesize[].
+ * AVFrame.extended_data[] must also be set, but it should be the same as
+ * AVFrame.data[] except for planar audio with more channels than can fit
+ * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
+ * many data pointers as it can hold.
+ *
+ * if CODEC_CAP_DR1 is not set then get_buffer() must call
+ * avcodec_default_get_buffer() instead of providing buffers allocated by
+ * some other means.
+ *
+ * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
+ * need it. avcodec_default_get_buffer() aligns the output buffer properly,
+ * but if get_buffer() is overridden then alignment considerations should
+ * be taken into account.
+ *
+ * @see avcodec_default_get_buffer()
+ *
+ * Video:
+ *
+ * If pic.reference is set then the frame will be read later by libavcodec.
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * it may be called from a different thread, but not from more than one at
+ * once. Does not need to be reentrant.
+ *
+ * @see release_buffer(), reget_buffer()
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * Decoders cannot use the buffer after returning from
+ * avcodec_decode_audio4(), so they will not call release_buffer(), as it
+ * is assumed to be released immediately upon return.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called to release buffers which were allocated with get_buffer.
+ * A released buffer can be reused in get_buffer().
+ * pic.data[*] must be set to NULL.
+ * May be called from a different thread if frame multithreading is used,
+ * but not by more than one thread at once, so does not need to be reentrant.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+#if FF_API_PARSE_FRAME
+ /**
+ * If true, only parsing is done. The frame data is returned.
+ * Only MPEG audio decoders support this now.
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ attribute_deprecated int parse_only;
+#endif
+
+ /**
+ * 0-> h263 quant 1-> mpeg quant
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mpeg_quant;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char *stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char *stats_in;
+
+ /**
+ * ratecontrol qmin qmax limiting method
+ * 0-> clipping, 1-> use a nice continous function to limit qscale wthin qmin/qmax.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_qsquish;
+
+ float rc_qmod_amp;
+ int rc_qmod_freq;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ RcOverride *rc_override;
+ int rc_override_count;
+
+ /**
+ * rate control equation
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ const char *rc_eq;
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_min_rate;
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+ float rc_buffer_aggressivity;
+
+ /**
+ * qscale factor between P and I-frames
+ * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * initial complexity for pass1 ratecontrol
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_initial_cplx;
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_MLIB 4
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#define FF_IDCT_LIBMPEG2MMX 4
+#define FF_IDCT_PS2 5
+#define FF_IDCT_MLIB 6
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#define FF_IDCT_SH4 9
+#define FF_IDCT_SIMPLEARM 10
+#define FF_IDCT_H264 11
+#define FF_IDCT_VP3 12
+#define FF_IDCT_IPP 13
+#define FF_IDCT_XVIDMMX 14
+#define FF_IDCT_CAVS 15
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#define FF_IDCT_SIMPLEVIS 18
+#define FF_IDCT_WMV2 19
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_EA 21
+#define FF_IDCT_SIMPLENEON 22
+#define FF_IDCT_SIMPLEALPHA 23
+#define FF_IDCT_BINK 24
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int *slice_offset;
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+
+ /**
+ * dsp_mask could be add used to disable unwanted CPU features
+ * CPU features (i.e. MMX, SSE. ...)
+ *
+ * With the FORCE flag you may instead enable given CPU features.
+ * (Dangerous: Usable in case of misdetection, improper usage however will
+ * result into program crash.)
+ */
+ unsigned dsp_mask;
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * prediction method (needed for huffyuv)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int prediction_method;
+#define FF_PRED_LEFT 0
+#define FF_PRED_PLANE 1
+#define FF_PRED_MEDIAN 2
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * the picture in the bitstream
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ AVFrame *coded_frame;
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#define FF_DEBUG_MV 32
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#define FF_DEBUG_PTS 0x00000200
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#define FF_DEBUG_VIS_QP 0x00002000
+#define FF_DEBUG_VIS_MB_TYPE 0x00004000
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * prepass for motion estimation
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_me;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+ /**
+ * callback to negotiate the pixelFormat
+ * @param fmt is the list of formats which are supported by the codec,
+ * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
+ * The first is always the native one.
+ * @return the chosen format
+ * - encoding: unused
+ * - decoding: Set by user, if not set the native format will be chosen.
+ */
+ enum PixelFormat (*get_format)(struct AVCodecContext *s, const enum PixelFormat * fmt);
+
+ /**
+ * DTG active format information (additional aspect ratio
+ * information only used in DVB MPEG-2 transport streams)
+ * 0 if not set.
+ *
+ * - encoding: unused
+ * - decoding: Set by decoder.
+ */
+ int dtg_active_format;
+#define FF_DTG_AFD_SAME 8
+#define FF_DTG_AFD_4_3 9
+#define FF_DTG_AFD_16_9 10
+#define FF_DTG_AFD_14_9 11
+#define FF_DTG_AFD_4_3_SP_14_9 13
+#define FF_DTG_AFD_16_9_SP_14_9 14
+#define FF_DTG_AFD_SP_4_3 15
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * intra quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_quant_bias;
+#define FF_DEFAULT_QUANT_BIAS 999999
+
+ /**
+ * inter quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int inter_quant_bias;
+
+ /**
+ * color table ID
+ * - encoding: unused
+ * - decoding: Which clrtable should be used for 8bit RGB images.
+ * Tables have to be stored somewhere. FIXME
+ */
+ int color_table_id;
+
+#if FF_API_INTERNAL_CONTEXT
+ /**
+ * internal_buffer count
+ * Don't touch, used by libavcodec default_get_buffer().
+ * @deprecated this field was moved to an internal context
+ */
+ attribute_deprecated int internal_buffer_count;
+
+ /**
+ * internal_buffers
+ * Don't touch, used by libavcodec default_get_buffer().
+ * @deprecated this field was moved to an internal context
+ */
+ attribute_deprecated void *internal_buffer;
+#endif
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+#define FF_CODER_TYPE_VLC 0
+#define FF_CODER_TYPE_AC 1
+#define FF_CODER_TYPE_RAW 2
+#define FF_CODER_TYPE_RLE 3
+#define FF_CODER_TYPE_DEFLATE 4
+ /**
+ * coder type
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int coder_type;
+
+ /**
+ * context model
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int context_model;
+#if 0
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ uint8_t * (*realloc)(struct AVCodecContext *s, uint8_t *buf, int buf_size);
+#endif
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+ /**
+ * XVideo Motion Acceleration
+ * - encoding: forbidden
+ * - decoding: set by decoder
+ */
+ int xvmc_acceleration;
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *inter_matrix;
+
+ /**
+ * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * - encoding: unused
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int stream_codec_tag;
+
+ /**
+ * scene change detection threshold
+ * 0 is default, larger means fewer detected scene changes.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_threshold;
+
+ /**
+ * minimum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmin;
+
+ /**
+ * maximum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmax;
+
+#if FF_API_PALETTE_CONTROL
+ /**
+ * palette control structure
+ * - encoding: ??? (no palette-enabled encoder yet)
+ * - decoding: Set by user.
+ */
+ struct AVPaletteControl *palctrl;
+#endif
+
+ /**
+ * noise reduction strength
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int noise_reduction;
+
+ /**
+ * Called at the beginning of a frame to get cr buffer for it.
+ * Buffer type (size, hints) must be the same. libavcodec won't check it.
+ * libavcodec will pass previous buffer in pic, function should return
+ * same buffer or new buffer with old frame "painted" into it.
+ * If pic.data[0] == NULL must behave like get_buffer().
+ * if CODEC_CAP_DR1 is not set then reget_buffer() must call
+ * avcodec_default_reget_buffer() instead of providing buffers allocated by
+ * some other means.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int inter_threshold;
+
+ /**
+ * CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * Simulates errors in the bitstream to test error concealment.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int error_rate;
+
+#if FF_API_ANTIALIAS_ALGO
+ /**
+ * MP3 antialias algorithm, see FF_AA_* below.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ attribute_deprecated int antialias_algo;
+#define FF_AA_AUTO 0
+#define FF_AA_FASTINT 1 //not implemented yet
+#define FF_AA_INT 2
+#define FF_AA_FLOAT 3
+#endif
+
+ /**
+ * quantizer noise shaping
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int quantizer_noise_shaping;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+ /**
+ * thread opaque
+ * Can be used by execute() to store some per AVCodecContext stuff.
+ * - encoding: set by execute()
+ * - decoding: set by execute()
+ */
+ void *thread_opaque;
+
+ /**
+ * Motion estimation threshold below which no motion estimation is
+ * performed, but instead the user specified motion vectors are used.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_threshold;
+
+ /**
+ * Macroblock threshold below which the user specified macroblock types will be used.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_threshold;
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_dc_precision;
+
+ /**
+ * noise vs. sse weight for the nsse comparsion function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+
+#define FF_PROFILE_MPEG2_422 0
+#define FF_PROFILE_MPEG2_HIGH 1
+#define FF_PROFILE_MPEG2_SS 2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+#define FF_PROFILE_MPEG2_MAIN 4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE 66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN 77
+#define FF_PROFILE_H264_EXTENDED 88
+#define FF_PROFILE_H264_HIGH 100
+#define FF_PROFILE_H264_HIGH_10 110
+#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_422 122
+#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_444 144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444 44
+
+#define FF_PROFILE_VC1_SIMPLE 0
+#define FF_PROFILE_VC1_MAIN 1
+#define FF_PROFILE_VC1_COMPLEX 2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int lowres;
+
+ /**
+ * Bitstream width / height, may be different from width/height if lowres enabled.
+ * - encoding: unused
+ * - decoding: Set by user before init if known. Codec should override / dynamically change if needed.
+ */
+ int coded_width, coded_height;
+
+ /**
+ * frame skip threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_threshold;
+
+ /**
+ * frame skip factor
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_factor;
+
+ /**
+ * frame skip exponent
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_exp;
+
+ /**
+ * frame skip comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_cmp;
+
+ /**
+ * Border processing masking, raises the quantizer for mbs on the borders
+ * of the picture.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float border_masking;
+
+ /**
+ * minimum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_penalty_compensation;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int brd_scale;
+
+#if FF_API_X264_GLOBAL_OPTS
+ /**
+ * constant rate factor - quality-based VBR - values ~correspond to qps
+ * - encoding: Set by user.
+ * - decoding: unused
+ * @deprecated use 'crf' libx264 private option
+ */
+ attribute_deprecated float crf;
+
+ /**
+ * constant quantization parameter rate control method
+ * - encoding: Set by user.
+ * - decoding: unused
+ * @deprecated use 'cqp' libx264 private option
+ */
+ attribute_deprecated int cqp;
+#endif
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * chroma qp offset from luma
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chromaoffset;
+
+#if FF_API_X264_GLOBAL_OPTS
+ /**
+ * Influence how often B-frames are used.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int bframebias;
+#endif
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+#if FF_API_X264_GLOBAL_OPTS
+ /**
+ * Reduce fluctuations in qp (before curve compression).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated float complexityblur;
+
+ /**
+ * in-loop deblocking filter alphac0 parameter
+ * alpha is in the range -6...6
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int deblockalpha;
+
+ /**
+ * in-loop deblocking filter beta parameter
+ * beta is in the range -6...6
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int deblockbeta;
+
+ /**
+ * macroblock subpartition sizes to consider - p8x8, p4x4, b8x8, i8x8, i4x4
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int partitions;
+#define X264_PART_I4X4 0x001 /* Analyze i4x4 */
+#define X264_PART_I8X8 0x002 /* Analyze i8x8 (requires 8x8 transform) */
+#define X264_PART_P8X8 0x010 /* Analyze p16x8, p8x16 and p8x8 */
+#define X264_PART_P4X4 0x020 /* Analyze p8x4, p4x8, p4x4 */
+#define X264_PART_B8X8 0x100 /* Analyze b16x8, b8x16 and b8x8 */
+
+ /**
+ * direct MV prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int directpred;
+#endif
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+ /**
+ * Multiplied by qscale for each frame and added to scene_change_score.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_factor;
+
+ /**
+ *
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Adjust sensitivity of b_frame_strategy 1.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int b_sensitivity;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int min_prediction_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_prediction_order;
+
+#if FF_API_FLAC_GLOBAL_OPTS
+ /**
+ * @name FLAC options
+ * @deprecated Use FLAC encoder private options instead.
+ * @{
+ */
+
+ /**
+ * LPC coefficient precision - used by FLAC encoder
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int lpc_coeff_precision;
+
+ /**
+ * search method for selecting prediction order
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int prediction_order_method;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int min_partition_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int max_partition_order;
+ /**
+ * @}
+ */
+#endif
+
+ /**
+ * GOP timecode frame start number, in non drop frame format
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t timecode_frame_start;
+
+#if FF_API_REQUEST_CHANNELS
+ /**
+ * Decoder should decode to this many channels if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated Deprecated in favor of request_channel_layout.
+ */
+ int request_channels;
+#endif
+
+#if FF_API_DRC_SCALE
+ /**
+ * Percentage of dynamic range compression to be applied by the decoder.
+ * The default value is 1.0, corresponding to full compression.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated use AC3 decoder private option instead.
+ */
+ attribute_deprecated float drc_scale;
+#endif
+
+ /**
+ * opaque 64bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ uint64_t request_channel_layout;
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ struct AVHWAccel *hwaccel;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Hardware accelerator context.
+ * For some hardware accelerators, a global context needs to be
+ * provided by the user. In that case, this holds display-dependent
+ * data Libav cannot instantiate itself. Please refer to the
+ * Libav HW accelerator documentation to know how to fill this
+ * is. e.g. for VA API, this is a struct vaapi_context.
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *hwaccel_context;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to count-1.
+ * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+ * two instances of func executing at the same time will have the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement where when any call to func
+ * returns < 0 no further calls to func may be done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+#if FF_API_X264_GLOBAL_OPTS
+ /**
+ * explicit P-frame weighted prediction analysis method
+ * 0: off
+ * 1: fast blind weighting (one reference duplicate with -1 offset)
+ * 2: smart weighting (full fade detection analysis)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int weighted_p_pred;
+
+ /**
+ * AQ mode
+ * 0: Disabled
+ * 1: Variance AQ (complexity mask)
+ * 2: Auto-variance AQ (experimental)
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ attribute_deprecated int aq_mode;
+
+ /**
+ * AQ strength
+ * Reduces blocking and blurring in flat and textured areas.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ attribute_deprecated float aq_strength;
+
+ /**
+ * PSY RD
+ * Strength of psychovisual optimization
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ attribute_deprecated float psy_rd;
+
+ /**
+ * PSY trellis
+ * Strength of psychovisual optimization
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ attribute_deprecated float psy_trellis;
+
+ /**
+ * RC lookahead
+ * Number of frames for frametype and ratecontrol lookahead
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ attribute_deprecated int rc_lookahead;
+
+ /**
+ * Constant rate factor maximum
+ * With CRF encoding mode and VBV restrictions enabled, prevents quality from being worse
+ * than crf_max, even if doing so would violate VBV restrictions.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated float crf_max;
+#endif
+
+ int log_level_offset;
+
+#if FF_API_FLAC_GLOBAL_OPTS
+ /**
+ * Determine which LPC analysis algorithm to use.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ attribute_deprecated enum AVLPCType lpc_type;
+
+ /**
+ * Number of passes to use for Cholesky factorization during LPC analysis
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ attribute_deprecated int lpc_passes;
+#endif
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ uint8_t *subtitle_header;
+ int subtitle_header_size;
+
+ /**
+ * Current packet as passed into the decoder, to avoid having
+ * to pass the packet into every function. Currently only valid
+ * inside lavc and get/release_buffer callbacks.
+ * - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts
+ * - encoding: unused
+ */
+ AVPacket *pkt;
+
+#if FF_API_INTERNAL_CONTEXT
+ /**
+ * Whether this is a copy of the context which had init() called on it.
+ * This is used by multithreading - shared tables and picture pointers
+ * should be freed from the original context only.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ *
+ * @deprecated this field has been moved to an internal context
+ */
+ attribute_deprecated int is_copy;
+#endif
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
+ * so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * Set by the client if its custom get_buffer() callback can be called
+ * from another thread, which allows faster multithreaded decoding.
+ * draw_horiz_band() will be called from other threads regardless of this setting.
+ * Ignored if the default get_buffer() is used.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_safe_callbacks;
+
+ /**
+ * VBV delay coded in the last frame (in periods of a 27 MHz clock).
+ * Used for compliant TS muxing.
+ * - encoding: Set by libavcodec.
+ * - decoding: unused.
+ */
+ uint64_t vbv_delay;
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * Used to request a sample format from the decoder.
+ * - encoding: unused.
+ * - decoding: Set by user.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+#define AV_EF_CRCCHECK (1<<0)
+#define AV_EF_BITSTREAM (1<<1)
+#define AV_EF_BUFFER (1<<2)
+#define AV_EF_EXPLODE (1<<3)
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal *internal;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by libavcodec
+ */
+ enum AVFieldOrder field_order;
+} AVCodecContext;
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char *name; ///< short name for the profile
+} AVProfile;
+
+typedef struct AVCodecDefault AVCodecDefault;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ enum AVMediaType type;
+ enum CodecID id;
+ int priv_data_size;
+ int (*init)(AVCodecContext *);
+ int (*encode)(AVCodecContext *, uint8_t *buf, int buf_size, void *data);
+ int (*close)(AVCodecContext *);
+ int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
+ /**
+ * Codec capabilities.
+ * see CODEC_CAP_*
+ */
+ int capabilities;
+ struct AVCodec *next;
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(AVCodecContext *);
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum PixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+ const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+ uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+ const AVClass *priv_class; ///< AVClass for the private context
+ const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+ /**
+ * @name Frame-level threading support functions
+ * @{
+ */
+ /**
+ * If defined, called on thread contexts when they are created.
+ * If the codec allocates writable tables in init(), re-allocate them here.
+ * priv_data will be set to a copy of the original.
+ */
+ int (*init_thread_copy)(AVCodecContext *);
+ /**
+ * Copy necessary context variables from a previous thread context to the current one.
+ * If not defined, the next thread will start automatically; otherwise, the codec
+ * must call ff_thread_finish_setup().
+ *
+ * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+ */
+ int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
+ /** @} */
+
+ /**
+ * Private codec-specific defaults.
+ */
+ const AVCodecDefault *defaults;
+
+ /**
+ * Initialize codec static data, called from avcodec_register().
+ */
+ void (*init_static_data)(struct AVCodec *codec);
+
+ /**
+ * Encode data to an AVPacket.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket (may contain a user-provided buffer)
+ * @param[in] frame AVFrame containing the raw data to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
+ int *got_packet_ptr);
+} AVCodec;
+
+/**
+ * AVHWAccel.
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char *name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See CODEC_ID_xxx
+ */
+ enum CodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum PixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see FF_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ struct AVHWAccel *next;
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext *avctx);
+
+ /**
+ * Size of HW accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int priv_data_size;
+} AVHWAccel;
+
+/**
+ * four components are given, that's all.
+ * the last component is alpha
+ */
+typedef struct AVPicture {
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+ int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
+} AVPicture;
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+#if FF_API_PALETTE_CONTROL
+/**
+ * AVPaletteControl
+ * This structure defines a method for communicating palette changes
+ * between and demuxer and a decoder.
+ *
+ * @deprecated Use AVPacket to send palette changes instead.
+ * This is totally broken.
+ */
+typedef struct AVPaletteControl {
+
+ /* Demuxer sets this to 1 to indicate the palette has changed;
+ * decoder resets to 0. */
+ int palette_changed;
+
+ /* 4-byte ARGB palette entries, stored in native byte order; note that
+ * the individual palette components should be on a 8-bit scale; if
+ * the palette data comes from an IBM VGA native format, the component
+ * data is probably 6 bits in size and needs to be scaled. */
+ unsigned int palette[AVPALETTE_COUNT];
+
+} AVPaletteControl attribute_deprecated;
+#endif
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * can be set for text/ass as well once they where rendered
+ */
+ AVPicture pict;
+ enum AVSubtitleType type;
+
+ char *text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The pressentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char *ass;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect **rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/* packet functions */
+
+/**
+ * @deprecated use NULL instead
+ */
+attribute_deprecated void av_destruct_packet_nofree(AVPacket *pkt);
+
+/**
+ * Default packet destructor.
+ */
+void av_destruct_packet(AVPacket *pkt);
+
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * @param pkt packet
+ */
+void av_init_packet(AVPacket *pkt);
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated.
+ */
+int av_dup_packet(AVPacket *pkt);
+
+/**
+ * Free a packet.
+ *
+ * @param pkt packet to free
+ */
+void av_free_packet(AVPacket *pkt);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int *size);
+
+/* resample.c */
+
+struct ReSampleContext;
+struct AVResampleContext;
+
+typedef struct ReSampleContext ReSampleContext;
+
+/**
+ * Initialize audio resampling context.
+ *
+ * @param output_channels number of output channels
+ * @param input_channels number of input channels
+ * @param output_rate output sample rate
+ * @param input_rate input sample rate
+ * @param sample_fmt_out requested output sample format
+ * @param sample_fmt_in input sample format
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear if 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ * @return allocated ReSampleContext, NULL if error occurred
+ */
+ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
+ int output_rate, int input_rate,
+ enum AVSampleFormat sample_fmt_out,
+ enum AVSampleFormat sample_fmt_in,
+ int filter_length, int log2_phase_count,
+ int linear, double cutoff);
+
+int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
+
+/**
+ * Free resample context.
+ *
+ * @param s a non-NULL pointer to a resample context previously
+ * created with av_audio_resample_init()
+ */
+void audio_resample_close(ReSampleContext *s);
+
+
+/**
+ * Initialize an audio resampler.
+ * Note, if either rate is not an integer then simply scale both rates up so they are.
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear If 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ */
+struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
+
+/**
+ * Resample an array of samples using a previously configured context.
+ * @param src an array of unconsumed samples
+ * @param consumed the number of samples of src which have been consumed are returned here
+ * @param src_size the number of unconsumed samples available
+ * @param dst_size the amount of space in samples available in dst
+ * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
+ * @return the number of samples written in dst or -1 if an error occurred
+ */
+int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
+
+
+/**
+ * Compensate samplerate/timestamp drift. The compensation is done by changing
+ * the resampler parameters, so no audible clicks or similar distortions occur
+ * @param compensation_distance distance in output samples over which the compensation should be performed
+ * @param sample_delta number of output samples which should be output less
+ *
+ * example: av_resample_compensate(c, 10, 500)
+ * here instead of 510 samples only 500 samples would be output
+ *
+ * note, due to rounding the actual compensation might be slightly different,
+ * especially if the compensation_distance is large and the in_rate used during init is small
+ */
+void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
+void av_resample_close(struct AVResampleContext *c);
+
+/**
+ * Allocate memory for a picture. Call avpicture_free() to free it.
+ *
+ * @see avpicture_fill()
+ *
+ * @param picture the picture to be filled in
+ * @param pix_fmt the format of the picture
+ * @param width the width of the picture
+ * @param height the height of the picture
+ * @return zero if successful, a negative value if not
+ */
+int avpicture_alloc(AVPicture *picture, enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Free a picture previously allocated by avpicture_alloc().
+ * The data buffer used by the AVPicture is freed, but the AVPicture structure
+ * itself is not.
+ *
+ * @param picture the AVPicture to be freed
+ */
+void avpicture_free(AVPicture *picture);
+
+/**
+ * Fill in the AVPicture fields.
+ * The fields of the given AVPicture are filled in by using the 'ptr' address
+ * which points to the image data buffer. Depending on the specified picture
+ * format, one or multiple image data pointers and line sizes will be set.
+ * If a planar format is specified, several pointers will be set pointing to
+ * the different picture planes and the line sizes of the different planes
+ * will be stored in the lines_sizes array.
+ * Call with ptr == NULL to get the required size for the ptr buffer.
+ *
+ * To allocate the buffer and fill in the AVPicture fields in one call,
+ * use avpicture_alloc().
+ *
+ * @param picture AVPicture whose fields are to be filled in
+ * @param ptr Buffer which will contain or contains the actual image data
+ * @param pix_fmt The format in which the picture data is stored.
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @return size of the image data in bytes
+ */
+int avpicture_fill(AVPicture *picture, uint8_t *ptr,
+ enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Copy pixel data from an AVPicture into a buffer.
+ * The data is stored compactly, without any gaps for alignment or padding
+ * which may be applied by avpicture_fill().
+ *
+ * @see avpicture_get_size()
+ *
+ * @param[in] src AVPicture containing image data
+ * @param[in] pix_fmt The format in which the picture data is stored.
+ * @param[in] width the width of the image in pixels.
+ * @param[in] height the height of the image in pixels.
+ * @param[out] dest A buffer into which picture data will be copied.
+ * @param[in] dest_size The size of 'dest'.
+ * @return The number of bytes written to dest, or a negative value (error code) on error.
+ */
+int avpicture_layout(const AVPicture* src, enum PixelFormat pix_fmt, int width, int height,
+ unsigned char *dest, int dest_size);
+
+/**
+ * Calculate the size in bytes that a picture of the given width and height
+ * would occupy if stored in the given picture format.
+ * Note that this returns the size of a compact representation as generated
+ * by avpicture_layout(), which can be smaller than the size required for e.g.
+ * avpicture_fill().
+ *
+ * @param pix_fmt the given picture format
+ * @param width the width of the image
+ * @param height the height of the image
+ * @return Image data size in bytes or -1 on error (e.g. too large dimensions).
+ */
+int avpicture_get_size(enum PixelFormat pix_fmt, int width, int height);
+void avcodec_get_chroma_sub_sample(enum PixelFormat pix_fmt, int *h_shift, int *v_shift);
+
+#if FF_API_GET_PIX_FMT_NAME
+/**
+ * @deprecated Deprecated in favor of av_get_pix_fmt_name().
+ */
+attribute_deprecated
+const char *avcodec_get_pix_fmt_name(enum PixelFormat pix_fmt);
+#endif
+
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum PixelFormat pix_fmt);
+
+/**
+ * Put a string representing the codec tag codec_tag in buf.
+ *
+ * @param buf_size size in bytes of buf
+ * @return the length of the string that would have been generated if
+ * enough space had been available, excluding the trailing null
+ */
+size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
+
+#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * avcodec_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur.
+ */
+int avcodec_get_pix_fmt_loss(enum PixelFormat dst_pix_fmt, enum PixelFormat src_pix_fmt,
+ int has_alpha);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_mask parameter.
+ *
+ * @code
+ * src_pix_fmt = PIX_FMT_YUV420P;
+ * pix_fmt_mask = (1 << PIX_FMT_YUV422P) || (1 << PIX_FMT_RGB24);
+ * dst_pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src_pix_fmt, alpha, &loss);
+ * @endcode
+ *
+ * @param[in] pix_fmt_mask bitmask determining which pixel format to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum PixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum PixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+
+#if FF_API_GET_ALPHA_INFO
+#define FF_ALPHA_TRANSP 0x0001 /* image has some totally transparent pixels */
+#define FF_ALPHA_SEMI_TRANSP 0x0002 /* image has some transparent pixels */
+
+/**
+ * Tell if an image really has transparent alpha values.
+ * @return ored mask of FF_ALPHA_xxx constants
+ */
+attribute_deprecated
+int img_get_alpha_info(const AVPicture *src,
+ enum PixelFormat pix_fmt, int width, int height);
+#endif
+
+/* deinterlace a picture */
+/* deinterlace - if not supported return -1 */
+int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int width, int height);
+
+/* external high level API */
+
+/**
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
+ */
+AVCodec *av_codec_next(AVCodec *c);
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char *avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char *avcodec_license(void);
+
+#if FF_API_AVCODEC_INIT
+/**
+ * @deprecated this function is called automatically from avcodec_register()
+ * and avcodec_register_all(), there is no need to call it manually
+ */
+attribute_deprecated
+void avcodec_init(void);
+#endif
+
+/**
+ * Register the codec codec and initialize libavcodec.
+ *
+ * @warning either this function or avcodec_register_all() must be called
+ * before any other libavcodec functions.
+ *
+ * @see avcodec_register_all()
+ */
+void avcodec_register(AVCodec *codec);
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id CodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum CodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id CodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder(enum CodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder_by_name(const char *name);
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+#if FF_API_ALLOC_CONTEXT
+/**
+ * Set the fields of the given AVCodecContext to default values.
+ *
+ * @param s The AVCodecContext of which the fields should be set to default values.
+ * @deprecated use avcodec_get_context_defaults3
+ */
+attribute_deprecated
+void avcodec_get_context_defaults(AVCodecContext *s);
+
+/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API!
+ * we WILL change its arguments and name a few times! */
+attribute_deprecated
+void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType);
+#endif
+
+/**
+ * Set the fields of the given AVCodecContext to default values corresponding
+ * to the given codec (defaults may be codec-dependent).
+ *
+ * Do not call this function if a non-NULL codec has been passed
+ * to avcodec_alloc_context3() that allocated this AVCodecContext.
+ * If codec is non-NULL, it is illegal to call avcodec_open2() with a
+ * different codec on this AVCodecContext.
+ */
+int avcodec_get_context_defaults3(AVCodecContext *s, AVCodec *codec);
+
+#if FF_API_ALLOC_CONTEXT
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct can be deallocated by simply calling av_free().
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ *
+ * @deprecated use avcodec_alloc_context3()
+ */
+attribute_deprecated
+AVCodecContext *avcodec_alloc_context(void);
+
+/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API!
+ * we WILL change its arguments and name a few times! */
+attribute_deprecated
+AVCodecContext *avcodec_alloc_context2(enum AVMediaType);
+#endif
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct can be deallocated by calling avcodec_close() on it followed
+ * by av_free().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ */
+AVCodecContext *avcodec_alloc_context3(AVCodec *codec);
+
+/**
+ * Copy the settings of the source AVCodecContext into the destination
+ * AVCodecContext. The resulting destination codec context will be
+ * unopened, i.e. you are required to call avcodec_open2() before you
+ * can use this AVCodecContext to decode/encode video/audio data.
+ *
+ * @param dest target codec context, should be initialized with
+ * avcodec_alloc_context3(), but otherwise uninitialized
+ * @param src source codec context
+ * @return AVERROR() on error (e.g. memory allocation error), 0 on success
+ */
+int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
+
+/**
+ * Set the fields of the given AVFrame to default values.
+ *
+ * @param pic The AVFrame of which the fields should be set to default values.
+ */
+void avcodec_get_frame_defaults(AVFrame *pic);
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct can be deallocated by simply calling av_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ * @see avcodec_get_frame_defaults
+ */
+AVFrame *avcodec_alloc_frame(void);
+
+int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+
+/**
+ * Return the amount of padding in pixels which the get_buffer callback must
+ * provide around the edge of the image for codecs which do not have the
+ * CODEC_FLAG_EMU_EDGE flag.
+ *
+ * @return Required padding in pixels.
+ */
+unsigned avcodec_get_edge_width(void);
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased
+ * according to avcodec_get_edge_width() before.
+ */
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased
+ * according to avcodec_get_edge_width() before.
+ */
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat * fmt);
+
+#if FF_API_THREAD_INIT
+/**
+ * @deprecated Set s->thread_count before calling avcodec_open2() instead of calling this.
+ */
+attribute_deprecated
+int avcodec_thread_init(AVCodecContext *s, int thread_count);
+#endif
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+#if FF_API_AVCODEC_OPEN
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated.
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @code
+ * avcodec_register_all();
+ * codec = avcodec_find_decoder(CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open(context, codec) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context which will be set up to use the given codec.
+ * @param codec The codec to use within the context.
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close
+ *
+ * @deprecated use avcodec_open2
+ */
+attribute_deprecated
+int avcodec_open(AVCodecContext *avctx, AVCodec *codec);
+#endif
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @code
+ * avcodec_register_all();
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * avcodec_get_context_defaults3() for this context, then this
+ * parameter MUST be either NULL or equal to the previously passed
+ * codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ * On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ * av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext *avctx, AVCodec *codec, AVDictionary **options);
+
+#if FF_API_OLD_DECODE_AUDIO
+/**
+ * Wrapper function which calls avcodec_decode_audio4.
+ *
+ * @deprecated Use avcodec_decode_audio4 instead.
+ *
+ * Decode the audio frame of size avpkt->size from avpkt->data into samples.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio3 has to be called again with an AVPacket that contains
+ * the remaining data in order to decode the second frame etc.
+ * If no frame
+ * could be outputted, frame_size_ptr is zero. Otherwise, it is the
+ * decompressed frame size in bytes.
+ *
+ * @warning You must set frame_size_ptr to the allocated size of the
+ * output buffer before calling avcodec_decode_audio3().
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @warning You must not provide a custom get_buffer() when using
+ * avcodec_decode_audio3(). Doing so will override it with
+ * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead,
+ * which does allow the application to provide a custom get_buffer().
+ *
+ * @note You might have to align the input buffer avpkt->data and output buffer
+ * samples. The alignment requirements depend on the CPU: On some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance.
+ *
+ * In practice, avpkt->data should have 4 byte alignment at minimum and
+ * samples should be 16 byte aligned unless the CPU doesn't need it
+ * (AltiVec and SSE do).
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] samples the output buffer, sample type in avctx->sample_fmt
+ * If the sample format is planar, each channel plane will
+ * be the same size, with no padding between channels.
+ * @param[in,out] frame_size_ptr the output buffer size in bytes
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields.
+ * All decoders are designed to use the least fields possible though.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame data was decompressed (used) from the input AVPacket.
+ */
+attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
+ int *frame_size_ptr,
+ AVPacket *avpkt);
+#endif
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio4 has to be called again with an AVPacket containing
+ * the remaining data in order to decode the second frame, etc...
+ * Even if no frames are returned, the packet needs to be fed to the decoder
+ * with remaining data until it is completely consumed or an error occurs.
+ *
+ * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note You might have to align the input buffer. The alignment requirements
+ * depend on the CPU and the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The
+ * decoder may, however, only utilize part of the buffer by
+ * setting AVFrame.nb_samples to a smaller value in the
+ * output frame.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ * non-zero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * At least avpkt->data and avpkt->size should be set. Some
+ * decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ * decoding, otherwise the number of bytes consumed from the input
+ * AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt);
+
+/**
+ * Decode the video frame of size avpkt->size from avpkt->data into picture.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame.
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note You might have to align the input buffer avpkt->data.
+ * The alignment requirements depend on the CPU: on some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance.
+ *
+ * In practice, avpkt->data should have 4 byte alignment at minimum.
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] picture The AVFrame in which the decoded video frame will be stored.
+ * Use avcodec_alloc_frame to get an AVFrame, the codec will
+ * allocate memory for the actual bitmap.
+ * with default get/release_buffer(), the decoder frees/reuses the bitmap as it sees fit.
+ * with overridden get/release_buffer() (needs CODEC_CAP_DR1) the user decides into what buffer the decoder
+ * decodes and the decoder tells the user once it does not need the data anymore,
+ * the user app can at this point free/reuse/keep the memory as it sees fit.
+ *
+ * @param[in] avpkt The input AVpacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields like
+ * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
+ * fields possible.
+ * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ AVPacket *avpkt);
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expect to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform badly
+ * due to a potentially very different allocation pattern.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be
+ freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+ int *got_sub_ptr,
+ AVPacket *avpkt);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
+
+#if FF_API_OLD_ENCODE_AUDIO
+/**
+ * Encode an audio frame from samples into buf.
+ *
+ * @deprecated Use avcodec_encode_audio2 instead.
+ *
+ * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
+ * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
+ * will know how much space is needed because it depends on the value passed
+ * in buf_size as described below. In that case a lower value can be used.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer
+ * @param[in] buf_size the output buffer size
+ * @param[in] samples the input buffer containing the samples
+ * The number of samples read from this buffer is frame_size*channels,
+ * both of which are defined in avctx.
+ * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
+ * samples read from samples is equal to:
+ * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
+ * This also implies that av_get_bits_per_sample() must not return 0 for these
+ * codecs.
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used to encode the data read from the input buffer.
+ */
+int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size,
+ const short *samples);
+#endif
+
+/**
+ * Encode a frame of audio.
+ *
+ * Takes input samples from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay, split, and combine input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet.
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * There are 2 codec capabilities that affect the allowed
+ * values of frame->nb_samples.
+ * If CODEC_CAP_SMALL_LAST_FRAME is set, then only the final
+ * frame may be smaller than avctx->frame_size, and all other
+ * frames must be equal to avctx->frame_size.
+ * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If neither is set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+/**
+ * Fill audio frame data and linesize.
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align);
+
+/**
+ * Encode a video frame from pict into buf.
+ * The input picture should be
+ * stored using a specific format, namely avctx.pix_fmt.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer for the bitstream of encoded frame
+ * @param[in] buf_size the size of the output buffer in bytes
+ * @param[in] pict the input picture to encode
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used from the output buffer.
+ */
+int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVFrame *pict);
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() /
+ * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
+ * do nothing.
+ */
+int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
+ *
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
+ */
+void avcodec_register_all(void);
+
+/**
+ * Flush buffers, should be called when seeking or when switching to a different stream.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+void avcodec_default_free_buffers(AVCodecContext *s);
+
+/* misc useful functions */
+
+#if FF_API_OLD_FF_PICT_TYPES
+/**
+ * Return a single letter to describe the given picture type pict_type.
+ *
+ * @param[in] pict_type the picture type
+ * @return A single character representing the picture type.
+ * @deprecated Use av_get_picture_type_char() instead.
+ */
+attribute_deprecated
+char av_get_pict_type_char(int pict_type);
+#endif
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum CodecID codec_id);
+
+#if FF_API_OLD_SAMPLE_FMT
+/**
+ * @deprecated Use av_get_bytes_per_sample() instead.
+ */
+attribute_deprecated
+int av_get_bits_per_sample_format(enum AVSampleFormat sample_fmt);
+#endif
+
+/* frame parsing */
+typedef struct AVCodecParserContext {
+ void *priv_data;
+ struct AVCodecParser *parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ /**
+ * Time difference in stream time base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current frame.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[5]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext *s);
+ int (*parser_parse)(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext *s);
+ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+ struct AVCodecParser *next;
+} AVCodecParser;
+
+AVCodecParser *av_parser_next(AVCodecParser *c);
+
+void av_register_codec_parser(AVCodecParser *parser);
+AVCodecParserContext *av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
+ * @param buf input buffer.
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos);
+
+int av_parser_change(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_parser_close(AVCodecParserContext *s);
+
+
+typedef struct AVBitStreamFilterContext {
+ void *priv_data;
+ struct AVBitStreamFilter *filter;
+ AVCodecParserContext *parser;
+ struct AVBitStreamFilterContext *next;
+} AVBitStreamFilterContext;
+
+
+typedef struct AVBitStreamFilter {
+ const char *name;
+ int priv_data_size;
+ int (*filter)(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+ void (*close)(AVBitStreamFilterContext *bsfc);
+ struct AVBitStreamFilter *next;
+} AVBitStreamFilter;
+
+void av_register_bitstream_filter(AVBitStreamFilter *bsf);
+AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
+int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
+
+AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f);
+
+/* memory */
+
+/**
+ * Reallocate the given block if it is not large enough, otherwise do nothing.
+ *
+ * @see av_realloc
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ * *size 0 if an error occurred.
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Copy image src to dst. Wraps av_picture_data_copy() above.
+ */
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Crop image top and left side.
+ */
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+ enum PixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * Pad image.
+ */
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum PixelFormat pix_fmt,
+ int padtop, int padbottom, int padleft, int padright, int *color);
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+/**
+ * Log a generic warning message about a missing feature. This function is
+ * intended to be used internally by Libav (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] feature string containing the name of the missing feature
+ * @param[in] want_sample indicates if samples are wanted which exhibit this feature.
+ * If want_sample is non-zero, additional verbage will be added to the log
+ * message which tells the user how to report samples to the development
+ * mailing list.
+ */
+void av_log_missing_feature(void *avc, const char *feature, int want_sample);
+
+/**
+ * Log a generic warning message asking for a sample. This function is
+ * intended to be used internally by Libav (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing an optional message, or NULL if no message
+ */
+void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);
+
+/**
+ * Register the hardware accelerator hwaccel.
+ */
+void av_register_hwaccel(AVHWAccel *hwaccel);
+
+/**
+ * If hwaccel is NULL, returns the first registered hardware accelerator,
+ * if hwaccel is non-NULL, returns the next registered hardware accelerator
+ * after hwaccel, or NULL if hwaccel is the last one.
+ */
+AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel);
+
+
+/**
+ * Lock operation used by lockmgr
+ */
+enum AVLockOp {
+ AV_LOCK_CREATE, ///< Create a mutex
+ AV_LOCK_OBTAIN, ///< Lock the mutex
+ AV_LOCK_RELEASE, ///< Unlock the mutex
+ AV_LOCK_DESTROY, ///< Free mutex resources
+};
+
+/**
+ * Register a user provided lock manager supporting the operations
+ * specified by AVLockOp. mutex points to a (void *) where the
+ * lockmgr should store/get a pointer to a user allocated mutex. It's
+ * NULL upon AV_LOCK_CREATE and != NULL for all other ops.
+ *
+ * @param cb User defined callback. Note: Libav may invoke calls to this
+ * callback during the call to av_lockmgr_register().
+ * Thus, the application must be prepared to handle that.
+ * If cb is set to NULL the lockmgr will be unregistered.
+ * Also note that during unregistration the previously registered
+ * lockmgr callback may also be invoked.
+ */
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum CodecID codec_id);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_class(void);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext *s);
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/avfft.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/avfft.h
new file mode 100644
index 000000000..91fe2f429
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/avfft.h
@@ -0,0 +1,99 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVFFT_H
+#define AVCODEC_AVFFT_H
+
+typedef float FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext FFTContext;
+
+/**
+ * Set up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+FFTContext *av_fft_init(int nbits, int inverse);
+
+/**
+ * Do the permutation needed BEFORE calling ff_fft_calc().
+ */
+void av_fft_permute(FFTContext *s, FFTComplex *z);
+
+/**
+ * Do a complex FFT with the parameters defined in av_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+void av_fft_calc(FFTContext *s, FFTComplex *z);
+
+void av_fft_end(FFTContext *s);
+
+FFTContext *av_mdct_init(int nbits, int inverse, double scale);
+void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_end(FFTContext *s);
+
+/* Real Discrete Fourier Transform */
+
+enum RDFTransformType {
+ DFT_R2C,
+ IDFT_C2R,
+ IDFT_R2C,
+ DFT_C2R,
+};
+
+typedef struct RDFTContext RDFTContext;
+
+/**
+ * Set up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);
+void av_rdft_calc(RDFTContext *s, FFTSample *data);
+void av_rdft_end(RDFTContext *s);
+
+/* Discrete Cosine Transform */
+
+typedef struct DCTContext DCTContext;
+
+enum DCTTransformType {
+ DCT_II = 0,
+ DCT_III,
+ DCT_I,
+ DST_I,
+};
+
+/**
+ * Set up DCT.
+ * @param nbits size of the input array:
+ * (1 << nbits) for DCT-II, DCT-III and DST-I
+ * (1 << nbits) + 1 for DCT-I
+ *
+ * @note the first element of the input of DST-I is ignored
+ */
+DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
+void av_dct_calc(DCTContext *s, FFTSample *data);
+void av_dct_end (DCTContext *s);
+
+#endif /* AVCODEC_AVFFT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/dxva2.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/dxva2.h
new file mode 100644
index 000000000..374ae039a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/dxva2.h
@@ -0,0 +1,71 @@
+/*
+ * DXVA2 HW acceleration
+ *
+ * copyright (c) 2009 Laurent Aimar
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DXVA_H
+#define AVCODEC_DXVA_H
+
+#include <stdint.h>
+
+#include <d3d9.h>
+#include <dxva2api.h>
+
+#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards
+
+/**
+ * This structure is used to provides the necessary configurations and data
+ * to the DXVA2 Libav HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct dxva_context {
+ /**
+ * DXVA2 decoder object
+ */
+ IDirectXVideoDecoder *decoder;
+
+ /**
+ * DXVA2 configuration used to create the decoder
+ */
+ const DXVA2_ConfigPictureDecode *cfg;
+
+ /**
+ * The number of surface in the surface array
+ */
+ unsigned surface_count;
+
+ /**
+ * The array of Direct3D surfaces used to create the decoder
+ */
+ LPDIRECT3DSURFACE9 *surface;
+
+ /**
+ * A bit field configuring the workarounds needed for using the decoder
+ */
+ uint64_t workaround;
+
+ /**
+ * Private to the Libav AVHWAccel implementation
+ */
+ unsigned report_id;
+};
+
+#endif /* AVCODEC_DXVA_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/old_codec_ids.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/old_codec_ids.h
new file mode 100644
index 000000000..ded4cc787
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/old_codec_ids.h
@@ -0,0 +1,398 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_OLD_CODEC_IDS_H
+#define AVCODEC_OLD_CODEC_IDS_H
+
+#include "libavutil/common.h"
+
+/*
+ * This header exists to prevent new codec IDs from being accidentally added to
+ * the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVCodecID enum instead.
+ */
+
+ CODEC_ID_NONE = AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ CODEC_ID_MPEG1VIDEO,
+ CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ CODEC_ID_MPEG2VIDEO_XVMC,
+ CODEC_ID_H261,
+ CODEC_ID_H263,
+ CODEC_ID_RV10,
+ CODEC_ID_RV20,
+ CODEC_ID_MJPEG,
+ CODEC_ID_MJPEGB,
+ CODEC_ID_LJPEG,
+ CODEC_ID_SP5X,
+ CODEC_ID_JPEGLS,
+ CODEC_ID_MPEG4,
+ CODEC_ID_RAWVIDEO,
+ CODEC_ID_MSMPEG4V1,
+ CODEC_ID_MSMPEG4V2,
+ CODEC_ID_MSMPEG4V3,
+ CODEC_ID_WMV1,
+ CODEC_ID_WMV2,
+ CODEC_ID_H263P,
+ CODEC_ID_H263I,
+ CODEC_ID_FLV1,
+ CODEC_ID_SVQ1,
+ CODEC_ID_SVQ3,
+ CODEC_ID_DVVIDEO,
+ CODEC_ID_HUFFYUV,
+ CODEC_ID_CYUV,
+ CODEC_ID_H264,
+ CODEC_ID_INDEO3,
+ CODEC_ID_VP3,
+ CODEC_ID_THEORA,
+ CODEC_ID_ASV1,
+ CODEC_ID_ASV2,
+ CODEC_ID_FFV1,
+ CODEC_ID_4XM,
+ CODEC_ID_VCR1,
+ CODEC_ID_CLJR,
+ CODEC_ID_MDEC,
+ CODEC_ID_ROQ,
+ CODEC_ID_INTERPLAY_VIDEO,
+ CODEC_ID_XAN_WC3,
+ CODEC_ID_XAN_WC4,
+ CODEC_ID_RPZA,
+ CODEC_ID_CINEPAK,
+ CODEC_ID_WS_VQA,
+ CODEC_ID_MSRLE,
+ CODEC_ID_MSVIDEO1,
+ CODEC_ID_IDCIN,
+ CODEC_ID_8BPS,
+ CODEC_ID_SMC,
+ CODEC_ID_FLIC,
+ CODEC_ID_TRUEMOTION1,
+ CODEC_ID_VMDVIDEO,
+ CODEC_ID_MSZH,
+ CODEC_ID_ZLIB,
+ CODEC_ID_QTRLE,
+ CODEC_ID_SNOW,
+ CODEC_ID_TSCC,
+ CODEC_ID_ULTI,
+ CODEC_ID_QDRAW,
+ CODEC_ID_VIXL,
+ CODEC_ID_QPEG,
+ CODEC_ID_PNG,
+ CODEC_ID_PPM,
+ CODEC_ID_PBM,
+ CODEC_ID_PGM,
+ CODEC_ID_PGMYUV,
+ CODEC_ID_PAM,
+ CODEC_ID_FFVHUFF,
+ CODEC_ID_RV30,
+ CODEC_ID_RV40,
+ CODEC_ID_VC1,
+ CODEC_ID_WMV3,
+ CODEC_ID_LOCO,
+ CODEC_ID_WNV1,
+ CODEC_ID_AASC,
+ CODEC_ID_INDEO2,
+ CODEC_ID_FRAPS,
+ CODEC_ID_TRUEMOTION2,
+ CODEC_ID_BMP,
+ CODEC_ID_CSCD,
+ CODEC_ID_MMVIDEO,
+ CODEC_ID_ZMBV,
+ CODEC_ID_AVS,
+ CODEC_ID_SMACKVIDEO,
+ CODEC_ID_NUV,
+ CODEC_ID_KMVC,
+ CODEC_ID_FLASHSV,
+ CODEC_ID_CAVS,
+ CODEC_ID_JPEG2000,
+ CODEC_ID_VMNC,
+ CODEC_ID_VP5,
+ CODEC_ID_VP6,
+ CODEC_ID_VP6F,
+ CODEC_ID_TARGA,
+ CODEC_ID_DSICINVIDEO,
+ CODEC_ID_TIERTEXSEQVIDEO,
+ CODEC_ID_TIFF,
+ CODEC_ID_GIF,
+ CODEC_ID_DXA,
+ CODEC_ID_DNXHD,
+ CODEC_ID_THP,
+ CODEC_ID_SGI,
+ CODEC_ID_C93,
+ CODEC_ID_BETHSOFTVID,
+ CODEC_ID_PTX,
+ CODEC_ID_TXD,
+ CODEC_ID_VP6A,
+ CODEC_ID_AMV,
+ CODEC_ID_VB,
+ CODEC_ID_PCX,
+ CODEC_ID_SUNRAST,
+ CODEC_ID_INDEO4,
+ CODEC_ID_INDEO5,
+ CODEC_ID_MIMIC,
+ CODEC_ID_RL2,
+ CODEC_ID_ESCAPE124,
+ CODEC_ID_DIRAC,
+ CODEC_ID_BFI,
+ CODEC_ID_CMV,
+ CODEC_ID_MOTIONPIXELS,
+ CODEC_ID_TGV,
+ CODEC_ID_TGQ,
+ CODEC_ID_TQI,
+ CODEC_ID_AURA,
+ CODEC_ID_AURA2,
+ CODEC_ID_V210X,
+ CODEC_ID_TMV,
+ CODEC_ID_V210,
+ CODEC_ID_DPX,
+ CODEC_ID_MAD,
+ CODEC_ID_FRWU,
+ CODEC_ID_FLASHSV2,
+ CODEC_ID_CDGRAPHICS,
+ CODEC_ID_R210,
+ CODEC_ID_ANM,
+ CODEC_ID_BINKVIDEO,
+ CODEC_ID_IFF_ILBM,
+ CODEC_ID_IFF_BYTERUN1,
+ CODEC_ID_KGV1,
+ CODEC_ID_YOP,
+ CODEC_ID_VP8,
+ CODEC_ID_PICTOR,
+ CODEC_ID_ANSI,
+ CODEC_ID_A64_MULTI,
+ CODEC_ID_A64_MULTI5,
+ CODEC_ID_R10K,
+ CODEC_ID_MXPEG,
+ CODEC_ID_LAGARITH,
+ CODEC_ID_PRORES,
+ CODEC_ID_JV,
+ CODEC_ID_DFA,
+ CODEC_ID_WMV3IMAGE,
+ CODEC_ID_VC1IMAGE,
+ CODEC_ID_UTVIDEO,
+ CODEC_ID_BMV_VIDEO,
+ CODEC_ID_VBLE,
+ CODEC_ID_DXTORY,
+ CODEC_ID_V410,
+ CODEC_ID_XWD,
+ CODEC_ID_CDXL,
+ CODEC_ID_XBM,
+ CODEC_ID_ZEROCODEC,
+ CODEC_ID_MSS1,
+ CODEC_ID_MSA1,
+ CODEC_ID_TSCC2,
+ CODEC_ID_MTS2,
+ CODEC_ID_CLLC,
+ CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
+ CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
+ CODEC_ID_EXR = MKBETAG('0','E','X','R'),
+ CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
+
+ CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'),
+ CODEC_ID_AVUI = MKBETAG('A','V','U','I'),
+ CODEC_ID_AYUV = MKBETAG('A','Y','U','V'),
+ CODEC_ID_V308 = MKBETAG('V','3','0','8'),
+ CODEC_ID_V408 = MKBETAG('V','4','0','8'),
+ CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'),
+ CODEC_ID_SANM = MKBETAG('S','A','N','M'),
+ CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'),
+
+ /* various PCM "codecs" */
+ CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ CODEC_ID_PCM_S16LE = 0x10000,
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_PCM_U16LE,
+ CODEC_ID_PCM_U16BE,
+ CODEC_ID_PCM_S8,
+ CODEC_ID_PCM_U8,
+ CODEC_ID_PCM_MULAW,
+ CODEC_ID_PCM_ALAW,
+ CODEC_ID_PCM_S32LE,
+ CODEC_ID_PCM_S32BE,
+ CODEC_ID_PCM_U32LE,
+ CODEC_ID_PCM_U32BE,
+ CODEC_ID_PCM_S24LE,
+ CODEC_ID_PCM_S24BE,
+ CODEC_ID_PCM_U24LE,
+ CODEC_ID_PCM_U24BE,
+ CODEC_ID_PCM_S24DAUD,
+ CODEC_ID_PCM_ZORK,
+ CODEC_ID_PCM_S16LE_PLANAR,
+ CODEC_ID_PCM_DVD,
+ CODEC_ID_PCM_F32BE,
+ CODEC_ID_PCM_F32LE,
+ CODEC_ID_PCM_F64BE,
+ CODEC_ID_PCM_F64LE,
+ CODEC_ID_PCM_BLURAY,
+ CODEC_ID_PCM_LXF,
+ CODEC_ID_S302M,
+ CODEC_ID_PCM_S8_PLANAR,
+
+ /* various ADPCM codecs */
+ CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ CODEC_ID_ADPCM_IMA_WAV,
+ CODEC_ID_ADPCM_IMA_DK3,
+ CODEC_ID_ADPCM_IMA_DK4,
+ CODEC_ID_ADPCM_IMA_WS,
+ CODEC_ID_ADPCM_IMA_SMJPEG,
+ CODEC_ID_ADPCM_MS,
+ CODEC_ID_ADPCM_4XM,
+ CODEC_ID_ADPCM_XA,
+ CODEC_ID_ADPCM_ADX,
+ CODEC_ID_ADPCM_EA,
+ CODEC_ID_ADPCM_G726,
+ CODEC_ID_ADPCM_CT,
+ CODEC_ID_ADPCM_SWF,
+ CODEC_ID_ADPCM_YAMAHA,
+ CODEC_ID_ADPCM_SBPRO_4,
+ CODEC_ID_ADPCM_SBPRO_3,
+ CODEC_ID_ADPCM_SBPRO_2,
+ CODEC_ID_ADPCM_THP,
+ CODEC_ID_ADPCM_IMA_AMV,
+ CODEC_ID_ADPCM_EA_R1,
+ CODEC_ID_ADPCM_EA_R3,
+ CODEC_ID_ADPCM_EA_R2,
+ CODEC_ID_ADPCM_IMA_EA_SEAD,
+ CODEC_ID_ADPCM_IMA_EA_EACS,
+ CODEC_ID_ADPCM_EA_XAS,
+ CODEC_ID_ADPCM_EA_MAXIS_XA,
+ CODEC_ID_ADPCM_IMA_ISS,
+ CODEC_ID_ADPCM_G722,
+ CODEC_ID_ADPCM_IMA_APC,
+ CODEC_ID_VIMA = MKBETAG('V','I','M','A'),
+
+ /* AMR */
+ CODEC_ID_AMR_NB = 0x12000,
+ CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ CODEC_ID_RA_144 = 0x13000,
+ CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ CODEC_ID_ROQ_DPCM = 0x14000,
+ CODEC_ID_INTERPLAY_DPCM,
+ CODEC_ID_XAN_DPCM,
+ CODEC_ID_SOL_DPCM,
+
+ /* audio codecs */
+ CODEC_ID_MP2 = 0x15000,
+ CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ CODEC_ID_AAC,
+ CODEC_ID_AC3,
+ CODEC_ID_DTS,
+ CODEC_ID_VORBIS,
+ CODEC_ID_DVAUDIO,
+ CODEC_ID_WMAV1,
+ CODEC_ID_WMAV2,
+ CODEC_ID_MACE3,
+ CODEC_ID_MACE6,
+ CODEC_ID_VMDAUDIO,
+ CODEC_ID_FLAC,
+ CODEC_ID_MP3ADU,
+ CODEC_ID_MP3ON4,
+ CODEC_ID_SHORTEN,
+ CODEC_ID_ALAC,
+ CODEC_ID_WESTWOOD_SND1,
+ CODEC_ID_GSM, ///< as in Berlin toast format
+ CODEC_ID_QDM2,
+ CODEC_ID_COOK,
+ CODEC_ID_TRUESPEECH,
+ CODEC_ID_TTA,
+ CODEC_ID_SMACKAUDIO,
+ CODEC_ID_QCELP,
+ CODEC_ID_WAVPACK,
+ CODEC_ID_DSICINAUDIO,
+ CODEC_ID_IMC,
+ CODEC_ID_MUSEPACK7,
+ CODEC_ID_MLP,
+ CODEC_ID_GSM_MS, /* as found in WAV */
+ CODEC_ID_ATRAC3,
+ CODEC_ID_VOXWARE,
+ CODEC_ID_APE,
+ CODEC_ID_NELLYMOSER,
+ CODEC_ID_MUSEPACK8,
+ CODEC_ID_SPEEX,
+ CODEC_ID_WMAVOICE,
+ CODEC_ID_WMAPRO,
+ CODEC_ID_WMALOSSLESS,
+ CODEC_ID_ATRAC3P,
+ CODEC_ID_EAC3,
+ CODEC_ID_SIPR,
+ CODEC_ID_MP1,
+ CODEC_ID_TWINVQ,
+ CODEC_ID_TRUEHD,
+ CODEC_ID_MP4ALS,
+ CODEC_ID_ATRAC1,
+ CODEC_ID_BINKAUDIO_RDFT,
+ CODEC_ID_BINKAUDIO_DCT,
+ CODEC_ID_AAC_LATM,
+ CODEC_ID_QDMC,
+ CODEC_ID_CELT,
+ CODEC_ID_G723_1,
+ CODEC_ID_G729,
+ CODEC_ID_8SVX_EXP,
+ CODEC_ID_8SVX_FIB,
+ CODEC_ID_BMV_AUDIO,
+ CODEC_ID_RALF,
+ CODEC_ID_IAC,
+ CODEC_ID_ILBC,
+ CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
+ CODEC_ID_8SVX_RAW = MKBETAG('8','S','V','X'),
+ CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
+ CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
+ CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'),
+ CODEC_ID_OPUS = MKBETAG('O','P','U','S'),
+
+ /* subtitle codecs */
+ CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ CODEC_ID_DVD_SUBTITLE = 0x17000,
+ CODEC_ID_DVB_SUBTITLE,
+ CODEC_ID_TEXT, ///< raw UTF-8 text
+ CODEC_ID_XSUB,
+ CODEC_ID_SSA,
+ CODEC_ID_MOV_TEXT,
+ CODEC_ID_HDMV_PGS_SUBTITLE,
+ CODEC_ID_DVB_TELETEXT,
+ CODEC_ID_SRT,
+ CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'),
+ CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'),
+ CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'),
+ CODEC_ID_SAMI = MKBETAG('S','A','M','I'),
+ CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'),
+ CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'),
+
+ /* other specific kind of codecs (generally used for attachments) */
+ CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ CODEC_ID_TTF = 0x18000,
+ CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'),
+ CODEC_ID_XBIN = MKBETAG('X','B','I','N'),
+ CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'),
+ CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'),
+
+ CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
+
+ CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+
+#endif /* AVCODEC_OLD_CODEC_IDS_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/opt.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/opt.h
new file mode 100644
index 000000000..2380e7433
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/opt.h
@@ -0,0 +1,34 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * This header is provided for compatibility only and will be removed
+ * on next major bump
+ */
+
+#ifndef AVCODEC_OPT_H
+#define AVCODEC_OPT_H
+
+#include "libavcodec/version.h"
+
+#if FF_API_OPT_H
+#include "libavutil/opt.h"
+#endif
+
+#endif /* AVCODEC_OPT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vaapi.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vaapi.h
new file mode 100644
index 000000000..36fb386ac
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vaapi.h
@@ -0,0 +1,167 @@
+/*
+ * Video Acceleration API (shared data between Libav and the video player)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2008-2009 Splitted-Desktop Systems
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_H
+#define AVCODEC_VAAPI_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup VAAPI_Decoding VA API Decoding
+ * @ingroup Decoder
+ * @{
+ */
+
+/**
+ * This structure is used to share data between the Libav library and
+ * the client video application.
+ * This shall be zero-allocated and available as
+ * AVCodecContext.hwaccel_context. All user members can be set once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ */
+struct vaapi_context {
+ /**
+ * Window system dependent data
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *display;
+
+ /**
+ * Configuration ID
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t config_id;
+
+ /**
+ * Context ID (video decode pipeline)
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t context_id;
+
+ /**
+ * VAPictureParameterBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t pic_param_buf_id;
+
+ /**
+ * VAIQMatrixBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t iq_matrix_buf_id;
+
+ /**
+ * VABitPlaneBuffer ID (for VC-1 decoding)
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t bitplane_buf_id;
+
+ /**
+ * Slice parameter/data buffer IDs
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t *slice_buf_ids;
+
+ /**
+ * Number of effective slice buffer IDs to send to the HW
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int n_slice_buf_ids;
+
+ /**
+ * Size of pre-allocated slice_buf_ids
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_buf_ids_alloc;
+
+ /**
+ * Pointer to VASliceParameterBuffers
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ void *slice_params;
+
+ /**
+ * Size of a VASliceParameterBuffer element
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_param_size;
+
+ /**
+ * Size of pre-allocated slice_params
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_params_alloc;
+
+ /**
+ * Number of slices currently filled in
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_count;
+
+ /**
+ * Pointer to slice data buffer base
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ const uint8_t *slice_data;
+
+ /**
+ * Current size of slice data
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t slice_data_size;
+};
+
+/* @} */
+
+#endif /* AVCODEC_VAAPI_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vda.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vda.h
new file mode 100644
index 000000000..2cb51c5f5
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vda.h
@@ -0,0 +1,144 @@
+/*
+ * VDA HW acceleration
+ *
+ * copyright (c) 2011 Sebastien Zwickert
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDA_H
+#define AVCODEC_VDA_H
+
+#include <pthread.h>
+#include <stdint.h>
+
+// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes
+// http://openradar.appspot.com/8026390
+#undef __GNUC_STDC_INLINE__
+
+#define Picture QuickdrawPicture
+#include <VideoDecodeAcceleration/VDADecoder.h>
+#undef Picture
+
+/**
+ * This structure is used to store a decoded frame information and data.
+ */
+typedef struct vda_frame {
+ /**
+ * The PTS of the frame.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ int64_t pts;
+
+ /**
+ * The CoreVideo buffer that contains the decoded data.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ CVPixelBufferRef cv_buffer;
+
+ /**
+ * A pointer to the next frame.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ struct vda_frame *next_frame;
+} vda_frame;
+
+/**
+ * This structure is used to provide the necessary configurations and data
+ * to the VDA Libav HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct vda_context {
+ /**
+ * VDA decoder object.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ VDADecoder decoder;
+
+ /**
+ * VDA frames queue ordered by presentation timestamp.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ vda_frame *queue;
+
+ /**
+ * Mutex for locking queue operations.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ pthread_mutex_t queue_mutex;
+
+ /**
+ * The frame width.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int width;
+
+ /**
+ * The frame height.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int height;
+
+ /**
+ * The frame format.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int format;
+
+ /**
+ * The pixel format for output image buffers.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ OSType cv_pix_fmt_type;
+};
+
+/** Create the video decoder. */
+int ff_vda_create_decoder(struct vda_context *vda_ctx,
+ uint8_t *extradata,
+ int extradata_size);
+
+/** Destroy the video decoder. */
+int ff_vda_destroy_decoder(struct vda_context *vda_ctx);
+
+/** Return the top frame of the queue. */
+vda_frame *ff_vda_queue_pop(struct vda_context *vda_ctx);
+
+/** Release the given frame. */
+void ff_vda_release_vda_frame(vda_frame *frame);
+
+#endif /* AVCODEC_VDA_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vdpau.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vdpau.h
new file mode 100644
index 000000000..6f1386067
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/vdpau.h
@@ -0,0 +1,88 @@
+/*
+ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
+ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
+ *
+ * Copyright (C) 2008 NVIDIA
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDPAU_H
+#define AVCODEC_VDPAU_H
+
+/**
+ * @defgroup Decoder VDPAU Decoder and Renderer
+ *
+ * VDPAU hardware acceleration has two modules
+ * - VDPAU decoding
+ * - VDPAU presentation
+ *
+ * The VDPAU decoding module parses all headers using Libav
+ * parsing mechanisms and uses VDPAU for the actual decoding.
+ *
+ * As per the current implementation, the actual decoding
+ * and rendering (API calls) are done as part of the VDPAU
+ * presentation (vo_vdpau.c) module.
+ *
+ * @defgroup VDPAU_Decoding VDPAU Decoding
+ * @ingroup Decoder
+ * @{
+ */
+
+#include <vdpau/vdpau.h>
+#include <vdpau/vdpau_x11.h>
+
+/** @brief The videoSurface is used for rendering. */
+#define FF_VDPAU_STATE_USED_FOR_RENDER 1
+
+/**
+ * @brief The videoSurface is needed for reference/prediction.
+ * The codec manipulates this.
+ */
+#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2
+
+/**
+ * @brief This structure is used as a callback between the Libav
+ * decoder (vd_) and presentation (vo_) module.
+ * This is used for defining a video frame containing surface,
+ * picture parameter, bitstream information etc which are passed
+ * between the Libav decoder and its clients.
+ */
+struct vdpau_render_state {
+ VdpVideoSurface surface; ///< Used as rendered surface, never changed.
+
+ int state; ///< Holds FF_VDPAU_STATE_* values.
+
+ /** picture parameter information for all supported codecs */
+ union VdpPictureInfo {
+ VdpPictureInfoH264 h264;
+ VdpPictureInfoMPEG1Or2 mpeg;
+ VdpPictureInfoVC1 vc1;
+ VdpPictureInfoMPEG4Part2 mpeg4;
+ } info;
+
+ /** Describe size/location of the compressed video data.
+ Set to 0 when freeing bitstream_buffers. */
+ int bitstream_buffers_allocated;
+ int bitstream_buffers_used;
+ /** The user is responsible for freeing this buffer using av_freep(). */
+ VdpBitstreamBuffer *bitstream_buffers;
+};
+
+/* @}*/
+
+#endif /* AVCODEC_VDPAU_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/version.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/version.h
new file mode 100644
index 000000000..77e16823f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/version.h
@@ -0,0 +1,126 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+#define LIBAVCODEC_VERSION_MAJOR 53
+#define LIBAVCODEC_VERSION_MINOR 35
+#define LIBAVCODEC_VERSION_MICRO 0
+
+#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+/**
+ * Those FF_API_* defines are not part of public API.
+ * They may change, break or disappear at any time.
+ */
+#ifndef FF_API_PALETTE_CONTROL
+#define FF_API_PALETTE_CONTROL (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_OLD_SAMPLE_FMT
+#define FF_API_OLD_SAMPLE_FMT (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_OLD_AUDIOCONVERT
+#define FF_API_OLD_AUDIOCONVERT (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_ANTIALIAS_ALGO
+#define FF_API_ANTIALIAS_ALGO (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_REQUEST_CHANNELS
+#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_OPT_H
+#define FF_API_OPT_H (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_THREAD_INIT
+#define FF_API_THREAD_INIT (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_OLD_FF_PICT_TYPES
+#define FF_API_OLD_FF_PICT_TYPES (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_FLAC_GLOBAL_OPTS
+#define FF_API_FLAC_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_GET_PIX_FMT_NAME
+#define FF_API_GET_PIX_FMT_NAME (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_ALLOC_CONTEXT
+#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_AVCODEC_OPEN
+#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_DRC_SCALE
+#define FF_API_DRC_SCALE (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_ER
+#define FF_API_ER (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_AVCODEC_INIT
+#define FF_API_AVCODEC_INIT (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_X264_GLOBAL_OPTS
+#define FF_API_X264_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_MPEGVIDEO_GLOBAL_OPTS
+#define FF_API_MPEGVIDEO_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_LAME_GLOBAL_OPTS
+#define FF_API_LAME_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_SNOW_GLOBAL_OPTS
+#define FF_API_SNOW_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_MJPEG_GLOBAL_OPTS
+#define FF_API_MJPEG_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_GET_ALPHA_INFO
+#define FF_API_GET_ALPHA_INFO (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_PARSE_FRAME
+#define FF_API_PARSE_FRAME (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_INTERNAL_CONTEXT
+#define FF_API_INTERNAL_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_TIFFENC_COMPLEVEL
+#define FF_API_TIFFENC_COMPLEVEL (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_DATA_POINTERS
+#define FF_API_DATA_POINTERS (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_OLD_DECODE_AUDIO
+#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_AVFRAME_AGE
+#define FF_API_AVFRAME_AGE (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_OLD_ENCODE_AUDIO
+#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 54)
+#endif
+
+#endif /* AVCODEC_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavcodec/xvmc.h b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/xvmc.h
new file mode 100644
index 000000000..1239015fc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavcodec/xvmc.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2003 Ivan Kalvachev
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_XVMC_H
+#define AVCODEC_XVMC_H
+
+#include <X11/extensions/XvMC.h>
+
+#include "avcodec.h"
+
+#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct
+ the number is 1337 speak for the letters IDCT MCo (motion compensation) */
+
+struct xvmc_pix_fmt {
+ /** The field contains the special constant value AV_XVMC_ID.
+ It is used as a test that the application correctly uses the API,
+ and that there is no corruption caused by pixel routines.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int xvmc_id;
+
+ /** Pointer to the block array allocated by XvMCCreateBlocks().
+ The array has to be freed by XvMCDestroyBlocks().
+ Each group of 64 values represents one data block of differential
+ pixel information (in MoCo mode) or coefficients for IDCT.
+ - application - set the pointer during initialization
+ - libavcodec - fills coefficients/pixel data into the array
+ */
+ short* data_blocks;
+
+ /** Pointer to the macroblock description array allocated by
+ XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks().
+ - application - set the pointer during initialization
+ - libavcodec - fills description data into the array
+ */
+ XvMCMacroBlock* mv_blocks;
+
+ /** Number of macroblock descriptions that can be stored in the mv_blocks
+ array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_mv_blocks;
+
+ /** Number of blocks that can be stored at once in the data_blocks array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_data_blocks;
+
+ /** Indicate that the hardware would interpret data_blocks as IDCT
+ coefficients and perform IDCT on them.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int idct;
+
+ /** In MoCo mode it indicates that intra macroblocks are assumed to be in
+ unsigned format; same as the XVMC_INTRA_UNSIGNED flag.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int unsigned_intra;
+
+ /** Pointer to the surface allocated by XvMCCreateSurface().
+ It has to be freed by XvMCDestroySurface() on application exit.
+ It identifies the frame and its state on the video hardware.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ XvMCSurface* p_surface;
+
+/** Set by the decoder before calling ff_draw_horiz_band(),
+ needed by the XvMCRenderSurface function. */
+//@{
+ /** Pointer to the surface used as past reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_past_surface;
+
+ /** Pointer to the surface used as future reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_future_surface;
+
+ /** top/bottom field or frame
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int picture_structure;
+
+ /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int flags;
+//}@
+
+ /** Number of macroblock descriptions in the mv_blocks array
+ that have already been passed to the hardware.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may increment it
+ with filled_mb_block_num or zero both.
+ - libavcodec - unchanged
+ */
+ int start_mv_blocks_num;
+
+ /** Number of new macroblock descriptions in the mv_blocks array (after
+ start_mv_blocks_num) that are filled by libavcodec and have to be
+ passed to the hardware.
+ - application - zeroes it on get_buffer() or after successful
+ ff_draw_horiz_band().
+ - libavcodec - increment with one of each stored MB
+ */
+ int filled_mv_blocks_num;
+
+ /** Number of the the next free data block; one data block consists of
+ 64 short values in the data_blocks array.
+ All blocks before this one have already been claimed by placing their
+ position into the corresponding block description structure field,
+ that are part of the mv_blocks array.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may zero it together
+ with start_mb_blocks_num.
+ - libavcodec - each decoded macroblock increases it by the number
+ of coded blocks it contains.
+ */
+ int next_free_data_block_num;
+};
+
+#endif /* AVCODEC_XVMC_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/adler32.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/adler32.h
new file mode 100644
index 000000000..a8ff6f9d4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/adler32.h
@@ -0,0 +1,43 @@
+/*
+ * copyright (c) 2006 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ADLER32_H
+#define AVUTIL_ADLER32_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+/**
+ * @ingroup lavu_crypto
+ * Calculate the Adler32 checksum of a buffer.
+ *
+ * Passing the return value to a subsequent av_adler32_update() call
+ * allows the checksum of multiple buffers to be calculated as though
+ * they were concatenated.
+ *
+ * @param adler initial checksum value
+ * @param buf pointer to input buffer
+ * @param len size of input buffer
+ * @return updated checksum
+ */
+unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
+ unsigned int len) av_pure;
+
+#endif /* AVUTIL_ADLER32_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/aes.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/aes.h
new file mode 100644
index 000000000..cf7b46209
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/aes.h
@@ -0,0 +1,57 @@
+/*
+ * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AES_H
+#define AVUTIL_AES_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_aes AES
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_aes_size;
+
+struct AVAES;
+
+/**
+ * Initialize an AVAES context.
+ * @param key_bits 128, 192 or 256
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ * @param count number of 16 byte blocks
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AES_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/attributes.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/attributes.h
new file mode 100644
index 000000000..ef990a1d4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/attributes.h
@@ -0,0 +1,136 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y)
+#else
+# define AV_GCC_VERSION_AT_LEAST(x,y) 0
+#endif
+
+#ifndef av_always_inline
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_always_inline __attribute__((always_inline)) inline
+#else
+# define av_always_inline inline
+#endif
+#endif
+
+#ifndef av_noinline
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_noinline __attribute__((noinline))
+#else
+# define av_noinline
+#endif
+#endif
+
+#ifndef av_pure
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+#endif
+
+#ifndef av_const
+#if AV_GCC_VERSION_AT_LEAST(2,6)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+#endif
+
+#ifndef av_cold
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+#endif
+
+#ifndef av_flatten
+#if AV_GCC_VERSION_AT_LEAST(4,1)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+#endif
+
+#ifndef attribute_deprecated
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define attribute_deprecated __attribute__((deprecated))
+#else
+# define attribute_deprecated
+#endif
+#endif
+
+#ifndef av_unused
+#if defined(__GNUC__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#ifndef av_used
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+#endif
+
+#ifndef av_alias
+#if AV_GCC_VERSION_AT_LEAST(3,3)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+#endif
+
+#ifndef av_uninit
+#if defined(__GNUC__) && !defined(__ICC)
+# define av_uninit(x) x=x
+#else
+# define av_uninit(x) x
+#endif
+#endif
+
+#ifdef __GNUC__
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/audio_fifo.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/audio_fifo.h
new file mode 100644
index 000000000..8c7638825
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/audio_fifo.h
@@ -0,0 +1,146 @@
+/*
+ * Audio FIFO
+ * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio FIFO Buffer
+ */
+
+#ifndef AVUTIL_AUDIO_FIFO_H
+#define AVUTIL_AUDIO_FIFO_H
+
+#include "avutil.h"
+#include "fifo.h"
+#include "samplefmt.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * Context for an Audio FIFO Buffer.
+ *
+ * - Operates at the sample level rather than the byte level.
+ * - Supports multiple channels with either planar or packed sample format.
+ * - Automatic reallocation when writing to a full buffer.
+ */
+typedef struct AVAudioFifo AVAudioFifo;
+
+/**
+ * Free an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to free
+ */
+void av_audio_fifo_free(AVAudioFifo *af);
+
+/**
+ * Allocate an AVAudioFifo.
+ *
+ * @param sample_fmt sample format
+ * @param channels number of channels
+ * @param nb_samples initial allocation size, in samples
+ * @return newly allocated AVAudioFifo, or NULL on error
+ */
+AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,
+ int nb_samples);
+
+/**
+ * Reallocate an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to reallocate
+ * @param nb_samples new allocation size, in samples
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Write data to an AVAudioFifo.
+ *
+ * The AVAudioFifo will be reallocated automatically if the available space
+ * is less than nb_samples.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to write to
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to write
+ * @return number of samples actually written, or negative AVERROR
+ * code on failure.
+ */
+int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Read data from an AVAudioFifo.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to read from
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to read
+ * @return number of samples actually read, or negative AVERROR code
+ * on failure.
+ */
+int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Drain data from an AVAudioFifo.
+ *
+ * Removes the data without reading it.
+ *
+ * @param af AVAudioFifo to drain
+ * @param nb_samples number of samples to drain
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Reset the AVAudioFifo buffer.
+ *
+ * This empties all data in the buffer.
+ *
+ * @param af AVAudioFifo to reset
+ */
+void av_audio_fifo_reset(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for reading.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for reading
+ */
+int av_audio_fifo_size(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for writing.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for writing
+ */
+int av_audio_fifo_space(AVAudioFifo *af);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AUDIO_FIFO_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/audioconvert.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/audioconvert.h
new file mode 100644
index 000000000..00ed0ff7b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/audioconvert.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AUDIOCONVERT_H
+#define AVUTIL_AUDIOCONVERT_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * audio conversion routines
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ * @{
+ */
+#define AV_CH_FRONT_LEFT 0x00000001
+#define AV_CH_FRONT_RIGHT 0x00000002
+#define AV_CH_FRONT_CENTER 0x00000004
+#define AV_CH_LOW_FREQUENCY 0x00000008
+#define AV_CH_BACK_LEFT 0x00000010
+#define AV_CH_BACK_RIGHT 0x00000020
+#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
+#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
+#define AV_CH_BACK_CENTER 0x00000100
+#define AV_CH_SIDE_LEFT 0x00000200
+#define AV_CH_SIDE_RIGHT 0x00000400
+#define AV_CH_TOP_CENTER 0x00000800
+#define AV_CH_TOP_FRONT_LEFT 0x00001000
+#define AV_CH_TOP_FRONT_CENTER 0x00002000
+#define AV_CH_TOP_FRONT_RIGHT 0x00004000
+#define AV_CH_TOP_BACK_LEFT 0x00008000
+#define AV_CH_TOP_BACK_CENTER 0x00010000
+#define AV_CH_TOP_BACK_RIGHT 0x00020000
+#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
+#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
+#define AV_CH_WIDE_LEFT 0x0000000080000000ULL
+#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
+#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
+#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order. */
+#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel convenience macros
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+
+/**
+ * @}
+ */
+
+/**
+ * Return a channel layout id that matches name, 0 if no match.
+ */
+uint64_t av_get_channel_layout(const char *name);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ */
+void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ */
+int av_get_channel_layout_nb_channels(uint64_t channel_layout);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AUDIOCONVERT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/avassert.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avassert.h
new file mode 100644
index 000000000..b223d26e8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avassert.h
@@ -0,0 +1,66 @@
+/*
+ * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple assert() macros that are a bit more flexible than ISO C assert().
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_AVASSERT_H
+#define AVUTIL_AVASSERT_H
+
+#include <stdlib.h>
+#include "avutil.h"
+#include "log.h"
+
+/**
+ * assert() equivalent, that is always enabled.
+ */
+#define av_assert0(cond) do { \
+ if (!(cond)) { \
+ av_log(NULL, AV_LOG_FATAL, "Assertion %s failed at %s:%d\n", \
+ AV_STRINGIFY(cond), __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+
+
+/**
+ * assert() equivalent, that does not lie in speed critical code.
+ * These asserts() thus can be enabled without fearing speedloss.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0
+#define av_assert1(cond) av_assert0(cond)
+#else
+#define av_assert1(cond) ((void)0)
+#endif
+
+
+/**
+ * assert() equivalent, that does lie in speed critical code.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+#define av_assert2(cond) av_assert0(cond)
+#else
+#define av_assert2(cond) ((void)0)
+#endif
+
+#endif /* AVUTIL_AVASSERT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/avconfig.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avconfig.h
new file mode 100644
index 000000000..f10aa6186
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avconfig.h
@@ -0,0 +1,6 @@
+/* Generated by ffconf */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 1
+#endif /* AVUTIL_AVCONFIG_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/avstring.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avstring.h
new file mode 100644
index 000000000..ed4e465cb
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avstring.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2007 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVSTRING_H
+#define AVUTIL_AVSTRING_H
+
+#include <stddef.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_string
+ * @{
+ */
+
+/**
+ * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
+ * the address of the first character in str after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_strstart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Return non-zero if pfx is a prefix of str independent of case. If
+ * it is, *ptr is set to the address of the first character in str
+ * after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_stristart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Locate the first case-independent occurrence in the string haystack
+ * of the string needle. A zero-length string needle is considered to
+ * match at the start of haystack.
+ *
+ * This function is a case-insensitive version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_stristr(const char *haystack, const char *needle);
+
+/**
+ * Copy the string src to dst, but no more than size - 1 bytes, and
+ * null-terminate dst.
+ *
+ * This function is the same as BSD strlcpy().
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the length of src
+ *
+ * @warning since the return value is the length of src, src absolutely
+ * _must_ be a properly 0-terminated string, otherwise this will read beyond
+ * the end of the buffer and possibly crash.
+ */
+size_t av_strlcpy(char *dst, const char *src, size_t size);
+
+/**
+ * Append the string src to the string dst, but to a total length of
+ * no more than size - 1 bytes, and null-terminate dst.
+ *
+ * This function is similar to BSD strlcat(), but differs when
+ * size <= strlen(dst).
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the total length of src and dst
+ *
+ * @warning since the return value use the length of src and dst, these
+ * absolutely _must_ be a properly 0-terminated strings, otherwise this
+ * will read beyond the end of the buffer and possibly crash.
+ */
+size_t av_strlcat(char *dst, const char *src, size_t size);
+
+/**
+ * Append output to a string, according to a format. Never write out of
+ * the destination buffer, and always put a terminating 0 within
+ * the buffer.
+ * @param dst destination buffer (string to which the output is
+ * appended)
+ * @param size total size of the destination buffer
+ * @param fmt printf-compatible format string, specifying how the
+ * following parameters are used
+ * @return the length of the string that would have been generated
+ * if enough space had been available
+ */
+size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Convert a number to a av_malloced string.
+ */
+char *av_d2str(double d);
+
+/**
+ * Unescape the given string until a non escaped terminating char,
+ * and return the token corresponding to the unescaped string.
+ *
+ * The normal \ and ' escaping is supported. Leading and trailing
+ * whitespaces are removed, unless they are escaped with '\' or are
+ * enclosed between ''.
+ *
+ * @param buf the buffer to parse, buf will be updated to point to the
+ * terminating char
+ * @param term a 0-terminated list of terminating chars
+ * @return the malloced unescaped string, which must be av_freed by
+ * the user, NULL in case of allocation failure
+ */
+char *av_get_token(const char **buf, const char *term);
+
+/**
+ * Locale-independent conversion of ASCII characters to uppercase.
+ */
+static inline int av_toupper(int c)
+{
+ if (c >= 'a' && c <= 'z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII characters to lowercase.
+ */
+static inline int av_tolower(int c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c ^= 0x20;
+ return c;
+}
+
+/*
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strcasecmp(const char *a, const char *b);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strncasecmp(const char *a, const char *b, size_t n);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AVSTRING_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/avutil.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avutil.h
new file mode 100644
index 000000000..05e924837
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/avutil.h
@@ -0,0 +1,326 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * external API header
+ */
+
+/**
+ * @mainpage
+ *
+ * @section libav_intro Introduction
+ *
+ * This document describe the usage of the different libraries
+ * provided by Libav.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @subpage libavfilter graph based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @subpage libpostproc post processing library
+ * @li @subpage libswscale color conversion and scaling library
+ *
+ */
+
+/**
+ * @defgroup lavu Common utility functions
+ *
+ * @brief
+ * libavutil contains the code shared across all the other Libav
+ * libraries
+ *
+ * @note In order to use the functions provided by avutil you must include
+ * the specific header.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Maths
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup lavu_internal Internal
+ *
+ * Not exported functions, for internal usage only
+ *
+ * @{
+ *
+ * @}
+ */
+
+
+/**
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup version_utils Library Version Macros
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c)
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * @}
+ *
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 51
+#define LIBAVUTIL_VERSION_MINOR 22
+#define LIBAVUTIL_VERSION_MICRO 1
+
+#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @}
+ *
+ * @defgroup depr_guards Deprecation guards
+ * Those FF_API_* defines are not part of public API.
+ * They may change, break or disappear at any time.
+ *
+ * They are used mostly internally to mark code that will be removed
+ * on the next major version.
+ *
+ * @{
+ */
+#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT
+#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 52)
+#endif
+#ifndef FF_API_FIND_OPT
+#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 52)
+#endif
+#ifndef FF_API_AV_FIFO_PEEK
+#define FF_API_AV_FIFO_PEEK (LIBAVUTIL_VERSION_MAJOR < 52)
+#endif
+#ifndef FF_API_OLD_AVOPTIONS
+#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 52)
+#endif
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char *avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char *avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256*128-1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * Libav internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE INT64_C(0x8000000000000000)
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE 1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_I = 1, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "common.h"
+#include "error.h"
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/base64.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/base64.h
new file mode 100644
index 000000000..4750cf5c7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/base64.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BASE64_H
+#define AVUTIL_BASE64_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_base64 Base64
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+
+/**
+ * Decode a base64-encoded string.
+ *
+ * @param out buffer for decoded data
+ * @param in null-terminated input string
+ * @param out_size size in bytes of the out buffer, must be at
+ * least 3/4 of the length of in
+ * @return number of bytes written, or a negative value in case of
+ * invalid input
+ */
+int av_base64_decode(uint8_t *out, const char *in, int out_size);
+
+/**
+ * Encode data to base64 and null-terminate.
+ *
+ * @param out buffer for encoded data
+ * @param out_size size in bytes of the output buffer, must be at
+ * least AV_BASE64_SIZE(in_size)
+ * @param in_size size in bytes of the 'in' buffer
+ * @return 'out' or NULL in case of error
+ */
+char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);
+
+/**
+ * Calculate the output size needed to base64-encode x bytes.
+ */
+#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1)
+
+ /**
+ * @}
+ */
+
+#endif /* AVUTIL_BASE64_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/blowfish.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/blowfish.h
new file mode 100644
index 000000000..0b004532d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/blowfish.h
@@ -0,0 +1,77 @@
+/*
+ * Blowfish algorithm
+ * Copyright (c) 2012 Samuel Pitoiset
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BLOWFISH_H
+#define AVUTIL_BLOWFISH_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_blowfish Blowfish
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#define AV_BF_ROUNDS 16
+
+typedef struct AVBlowfish {
+ uint32_t p[AV_BF_ROUNDS + 2];
+ uint32_t s[4][256];
+} AVBlowfish;
+
+/**
+ * Initialize an AVBlowfish context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param key a key
+ * @param key_len length of the key
+ */
+void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param xl left four bytes halves of input to be encrypted
+ * @param xr right four bytes halves of input to be encrypted
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,
+ int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BLOWFISH_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/bprint.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/bprint.h
new file mode 100644
index 000000000..c09b61f20
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/bprint.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BPRINT_H
+#define AVUTIL_BPRINT_H
+
+#include "attributes.h"
+
+/**
+ * Define a structure with extra padding to a fixed size
+ * This helps ensuring binary compatibility with future versions.
+ */
+#define FF_PAD_STRUCTURE(size, ...) \
+ __VA_ARGS__ \
+ char reserved_padding[size - sizeof(struct { __VA_ARGS__ })];
+
+/**
+ * Buffer to print data progressively
+ *
+ * The string buffer grows as necessary and is always 0-terminated.
+ * The content of the string is never accessed, and thus is
+ * encoding-agnostic and can even hold binary data.
+ *
+ * Small buffers are kept in the structure itself, and thus require no
+ * memory allocation at all (unless the contents of the buffer is needed
+ * after the structure goes out of scope). This is almost as lightweight as
+ * declaring a local "char buf[512]".
+ *
+ * The length of the string can go beyond the allocated size: the buffer is
+ * then truncated, but the functions still keep account of the actual total
+ * length.
+ *
+ * In other words, buf->len can be greater than buf->size and records the
+ * total length of what would have been to the buffer if there had been
+ * enough memory.
+ *
+ * Append operations do not need to be tested for failure: if a memory
+ * allocation fails, data stop being appended to the buffer, but the length
+ * is still updated. This situation can be tested with
+ * av_bprint_is_complete().
+ *
+ * The size_max field determines several possible behaviours:
+ *
+ * size_max = -1 (= UINT_MAX) or any large value will let the buffer be
+ * reallocated as necessary, with an amortized linear cost.
+ *
+ * size_max = 0 prevents writing anything to the buffer: only the total
+ * length is computed. The write operations can then possibly be repeated in
+ * a buffer with exactly the necessary size
+ * (using size_init = size_max = len + 1).
+ *
+ * size_max = 1 is automatically replaced by the exact size available in the
+ * structure itself, thus ensuring no dynamic memory allocation. The
+ * internal buffer is large enough to hold a reasonable paragraph of text,
+ * such as the current paragraph.
+ */
+typedef struct AVBPrint {
+ FF_PAD_STRUCTURE(1024,
+ char *str; /** string so far */
+ unsigned len; /** length so far */
+ unsigned size; /** allocated memory */
+ unsigned size_max; /** maximum allocated memory */
+ char reserved_internal_buffer[1];
+ )
+} AVBPrint;
+
+/**
+ * Convenience macros for special values for av_bprint_init() size_max
+ * parameter.
+ */
+#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1)
+#define AV_BPRINT_SIZE_AUTOMATIC 1
+#define AV_BPRINT_SIZE_COUNT_ONLY 0
+
+/**
+ * Init a print buffer.
+ *
+ * @param buf buffer to init
+ * @param size_init initial size (including the final 0)
+ * @param size_max maximum size;
+ * 0 means do not write anything, just count the length;
+ * 1 is replaced by the maximum value for automatic storage;
+ * any large value means that the internal buffer will be
+ * reallocated as needed up to that limit; -1 is converted to
+ * UINT_MAX, the largest limit possible.
+ * Check also AV_BPRINT_SIZE_* macros.
+ */
+void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);
+
+/**
+ * Init a print buffer using a pre-existing buffer.
+ *
+ * The buffer will not be reallocated.
+ *
+ * @param buf buffer structure to init
+ * @param buffer byte buffer to use for the string data
+ * @param size size of buffer
+ */
+void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size);
+
+/**
+ * Append a formated string to a print buffer.
+ */
+void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3);
+
+/**
+ * Append char c n times to a print buffer.
+ */
+void av_bprint_chars(AVBPrint *buf, char c, unsigned n);
+
+/**
+ * Allocate bytes in the buffer for external use.
+ *
+ * @param[in] buf buffer structure
+ * @param[in] size required size
+ * @param[out] mem pointer to the memory area
+ * @param[out] actual_size size of the memory area after allocation;
+ * can be larger or smaller than size
+ */
+void av_bprint_get_buffer(AVBPrint *buf, unsigned size,
+ unsigned char **mem, unsigned *actual_size);
+
+/**
+ * Reset the string to "" but keep internal allocated data.
+ */
+void av_bprint_clear(AVBPrint *buf);
+
+/**
+ * Test if the print buffer is complete (not truncated).
+ *
+ * It may have been truncated due to a memory allocation failure
+ * or the size_max limit (compare size and size_max if necessary).
+ */
+static inline int av_bprint_is_complete(AVBPrint *buf)
+{
+ return buf->len < buf->size;
+}
+
+/**
+ * Finalize a print buffer.
+ *
+ * The print buffer can no longer be used afterwards,
+ * but the len and size fields are still valid.
+ *
+ * @arg[out] ret_str if not NULL, used to return a permanent copy of the
+ * buffer contents, or NULL if memory allocation fails;
+ * if NULL, the buffer is discarded and freed
+ * @return 0 for success or error code (probably AVERROR(ENOMEM))
+ */
+int av_bprint_finalize(AVBPrint *buf, char **ret_str);
+
+#endif /* AVUTIL_BPRINT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/bswap.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/bswap.h
new file mode 100644
index 000000000..8a350e1cd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/bswap.h
@@ -0,0 +1,109 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * byte swapping routines
+ */
+
+#ifndef AVUTIL_BSWAP_H
+#define AVUTIL_BSWAP_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_ARM
+# include "arm/bswap.h"
+#elif ARCH_AVR32
+# include "avr32/bswap.h"
+#elif ARCH_BFIN
+# include "bfin/bswap.h"
+#elif ARCH_SH4
+# include "sh4/bswap.h"
+#elif ARCH_X86
+# include "x86/bswap.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff))
+#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))
+#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))
+
+#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)
+
+#ifndef av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+ x= (x>>8) | (x<<8);
+ return x;
+}
+#endif
+
+#ifndef av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return AV_BSWAP32C(x);
+}
+#endif
+
+#ifndef av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
+}
+#endif
+
+// be2ne ... big-endian to native-endian
+// le2ne ... little-endian to native-endian
+
+#if AV_HAVE_BIGENDIAN
+#define av_be2ne16(x) (x)
+#define av_be2ne32(x) (x)
+#define av_be2ne64(x) (x)
+#define av_le2ne16(x) av_bswap16(x)
+#define av_le2ne32(x) av_bswap32(x)
+#define av_le2ne64(x) av_bswap64(x)
+#define AV_BE2NEC(s, x) (x)
+#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)
+#else
+#define av_be2ne16(x) av_bswap16(x)
+#define av_be2ne32(x) av_bswap32(x)
+#define av_be2ne64(x) av_bswap64(x)
+#define av_le2ne16(x) (x)
+#define av_le2ne32(x) (x)
+#define av_le2ne64(x) (x)
+#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)
+#define AV_LE2NEC(s, x) (x)
+#endif
+
+#define AV_BE2NE16C(x) AV_BE2NEC(16, x)
+#define AV_BE2NE32C(x) AV_BE2NEC(32, x)
+#define AV_BE2NE64C(x) AV_BE2NEC(64, x)
+#define AV_LE2NE16C(x) AV_LE2NEC(16, x)
+#define AV_LE2NE32C(x) AV_LE2NEC(32, x)
+#define AV_LE2NE64C(x) AV_LE2NEC(64, x)
+
+#endif /* AVUTIL_BSWAP_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/common.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/common.h
new file mode 100644
index 000000000..c99d85847
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/common.h
@@ -0,0 +1,398 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#include <ctype.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "attributes.h"
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))
+
+/* misc math functions */
+extern const uint8_t ff_log2_tab[256];
+
+extern const uint8_t av_reverse[256];
+
+static av_always_inline av_const int av_log2_c(unsigned int v)
+{
+ int n = 0;
+ if (v & 0xffff0000) {
+ v >>= 16;
+ n += 16;
+ }
+ if (v & 0xff00) {
+ v >>= 8;
+ n += 8;
+ }
+ n += ff_log2_tab[v];
+
+ return n;
+}
+
+static av_always_inline av_const int av_log2_16bit_c(unsigned int v)
+{
+ int n = 0;
+ if (v & 0xff00) {
+ v >>= 8;
+ n += 8;
+ }
+ n += ff_log2_tab[v];
+
+ return n;
+}
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+#endif
+
+/* Pull in unguarded fallback defines at the end of this file. */
+#include "common.h"
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
+{
+ if (a&(~0xFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a)
+{
+ if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
+{
+ if (a&(~0xFFFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a)
+{
+ if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
+ else return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
+{
+ if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (a>>63) ^ 0x7FFFFFFF;
+ else return a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+ if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);
+ else return a;
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin, float amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x)
+{
+ return av_log2((x - 1) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x)
+{
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x)
+{
+ return av_popcount(x) + av_popcount(x >> 32);
+}
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR)\
+ val= GET_BYTE;\
+ {\
+ int ones= 7 - av_log2(val ^ 255);\
+ if(ones==1)\
+ ERROR\
+ val&= 127>>ones;\
+ while(--ones > 0){\
+ int tmp= GET_BYTE - 128;\
+ if(tmp>>6)\
+ ERROR\
+ val= (val<<6) + tmp;\
+ }\
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR)\
+ val = GET_16BIT;\
+ {\
+ unsigned int hi = val - 0xD800;\
+ if (hi < 0x800) {\
+ val = GET_16BIT - 0xDC00;\
+ if (val > 0x3FFU || hi > 0x3FFU)\
+ ERROR\
+ val += (hi<<10) + 0x10000;\
+ }\
+ }\
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+ {\
+ int bytes, shift;\
+ uint32_t in = val;\
+ if (in < 0x80) {\
+ tmp = in;\
+ PUT_BYTE\
+ } else {\
+ bytes = (av_log2(in) + 4) / 5;\
+ shift = (bytes - 1) * 6;\
+ tmp = (256 - (256 >> bytes)) | (in >> shift);\
+ PUT_BYTE\
+ while (shift >= 6) {\
+ shift -= 6;\
+ tmp = 0x80 | ((in >> shift) & 0x3f);\
+ PUT_BYTE\
+ }\
+ }\
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT)\
+ {\
+ uint32_t in = val;\
+ if (in < 0x10000) {\
+ tmp = in;\
+ PUT_16BIT\
+ } else {\
+ tmp = 0xD800 | ((in - 0x10000) >> 10);\
+ PUT_16BIT\
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
+ PUT_16BIT\
+ }\
+ }\
+
+
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
+
+/*
+ * The following definitions are outside the multiple inclusion guard
+ * to ensure they are immediately available in intmath.h.
+ */
+
+#ifndef av_log2
+# define av_log2 av_log2_c
+#endif
+#ifndef av_log2_16bit
+# define av_log2_16bit av_log2_16bit_c
+#endif
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/cpu.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/cpu.h
new file mode 100644
index 000000000..df7bf4421
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/cpu.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+ /* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+#define AV_CPU_FLAG_IWMMXT 0x0100 ///< XScale IWMMXT
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ */
+int av_get_cpu_flags(void);
+
+/* The following CPU-specific functions shall not be called directly. */
+int ff_get_cpu_flags_arm(void);
+int ff_get_cpu_flags_ppc(void);
+int ff_get_cpu_flags_x86(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/crc.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/crc.h
new file mode 100644
index 000000000..a93411941
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/crc.h
@@ -0,0 +1,44 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CRC_H
+#define AVUTIL_CRC_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include "attributes.h"
+
+typedef uint32_t AVCRC;
+
+typedef enum {
+ AV_CRC_8_ATM,
+ AV_CRC_16_ANSI,
+ AV_CRC_16_CCITT,
+ AV_CRC_32_IEEE,
+ AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
+ AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
+}AVCRCId;
+
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
+const AVCRC *av_crc_get_table(AVCRCId crc_id);
+uint32_t av_crc(const AVCRC *ctx, uint32_t start_crc, const uint8_t *buffer, size_t length) av_pure;
+
+#endif /* AVUTIL_CRC_H */
+
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/dict.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/dict.h
new file mode 100644
index 000000000..6e28b6140
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/dict.h
@@ -0,0 +1,121 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key:value pairs. To create
+ * an AVDictionary, simply pass an address of a NULL pointer to
+ * av_dict_set(). NULL can be used as an empty dictionary wherever
+ * a pointer to an AVDictionary is required.
+ * Use av_dict_get() to retrieve an entry or iterate over all
+ * entries and finally av_dict_free() to free the dictionary
+ * and all its contents.
+ *
+ * @code
+ * AVDictionary *d = NULL; // "create" an empty dictionary
+ * av_dict_set(&d, "foo", "bar", 0); // add an entry
+ *
+ * char *k = av_strdup("key"); // if your strings are already allocated,
+ * char *v = av_strdup("value"); // you can avoid copying them like this
+ * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+ *
+ * AVDictionaryEntry *t = NULL;
+ * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
+ * <....> // iterate over all entries in d
+ * }
+ *
+ * av_dict_free(&d);
+ * @endcode
+ *
+ */
+
+#define AV_DICT_MATCH_CASE 1
+#define AV_DICT_IGNORE_SUFFIX 2
+#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
+ allocated with av_malloc() and children. */
+#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
+ allocated with av_malloc() and chilren. */
+#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
+#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
+ delimiter is added, the strings are simply concatenated. */
+
+typedef struct {
+ char *key;
+ char *value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param flags Allows case as well as suffix-insensitive comparisons.
+ * @return Found entry or NULL, changing key or value leads to undefined behavior.
+ */
+AVDictionaryEntry *
+av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key entry key to add to *pm (will be av_strduped depending on flags)
+ * @param value entry value to add to *pm (will be av_strduped depending on flags).
+ * Passing a NULL value will cause an existing tag to be deleted.
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src pointer to source AVDictionary struct
+ * @param flags flags to use when setting entries in *dst
+ * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ */
+void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary **m);
+
+/**
+ * @}
+ */
+
+#endif // AVUTIL_DICT_H
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/error.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/error.h
new file mode 100644
index 000000000..11bcc5c4c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/error.h
@@ -0,0 +1,81 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include <errno.h>
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+
+/* error handling */
+#if EDOM > 0
+#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions.
+#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+
+#define AVERROR_BSF_NOT_FOUND (-MKTAG(0xF8,'B','S','F')) ///< Bitstream filter not found
+#define AVERROR_DECODER_NOT_FOUND (-MKTAG(0xF8,'D','E','C')) ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND (-MKTAG(0xF8,'D','E','M')) ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND (-MKTAG(0xF8,'E','N','C')) ///< Encoder not found
+#define AVERROR_EOF (-MKTAG( 'E','O','F',' ')) ///< End of file
+#define AVERROR_EXIT (-MKTAG( 'E','X','I','T')) ///< Immediate exit was requested; the called function should not be restarted
+#define AVERROR_FILTER_NOT_FOUND (-MKTAG(0xF8,'F','I','L')) ///< Filter not found
+#define AVERROR_INVALIDDATA (-MKTAG( 'I','N','D','A')) ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND (-MKTAG(0xF8,'M','U','X')) ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND (-MKTAG(0xF8,'O','P','T')) ///< Option not found
+#define AVERROR_PATCHWELCOME (-MKTAG( 'P','A','W','E')) ///< Not yet implemented in Libav, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND (-MKTAG(0xF8,'P','R','O')) ///< Protocol not found
+#define AVERROR_STREAM_NOT_FOUND (-MKTAG(0xF8,'S','T','R')) ///< Stream not found
+#define AVERROR_BUG (-MKTAG( 'B','U','G',' ')) ///< Bug detected, please report the issue
+#define AVERROR_UNKNOWN (-MKTAG( 'U','N','K','N')) ///< Unknown error, typically from an external library
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum error code to describe
+ * @param errbuf buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/eval.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/eval.h
new file mode 100644
index 000000000..ccb29e7a3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/eval.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple arithmetic expression evaluator
+ */
+
+#ifndef AVUTIL_EVAL_H
+#define AVUTIL_EVAL_H
+
+#include "avutil.h"
+
+typedef struct AVExpr AVExpr;
+
+/**
+ * Parse and evaluate an expression.
+ * Note, this is significantly slower than av_expr_eval().
+ *
+ * @param res a pointer to a double where is put the result value of
+ * the expression, or NAN in case of error
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param const_values a zero terminated array of values for the identifiers from const_names
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @param log_ctx parent logging context
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse_and_eval(double *res, const char *s,
+ const char * const *const_names, const double *const_values,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ void *opaque, int log_offset, void *log_ctx);
+
+/**
+ * Parse an expression.
+ *
+ * @param expr a pointer where is put an AVExpr containing the parsed
+ * value in case of successful parsing, or NULL otherwise.
+ * The pointed to AVExpr must be freed with av_expr_free() by the user
+ * when it is not needed anymore.
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param log_ctx parent logging context
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse(AVExpr **expr, const char *s,
+ const char * const *const_names,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ int log_offset, void *log_ctx);
+
+/**
+ * Evaluate a previously parsed expression.
+ *
+ * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @return the value of the expression
+ */
+double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
+
+/**
+ * Free a parsed expression previously created with av_expr_parse().
+ */
+void av_expr_free(AVExpr *e);
+
+/**
+ * Parse the string in numstr and return its value as a double. If
+ * the string is empty, contains only whitespaces, or does not contain
+ * an initial substring that has the expected syntax for a
+ * floating-point number, no conversion is performed. In this case,
+ * returns a value of zero and the value returned in tail is the value
+ * of numstr.
+ *
+ * @param numstr a string representing a number, may contain one of
+ * the International System number postfixes, for example 'K', 'M',
+ * 'G'. If 'i' is appended after the postfix, powers of 2 are used
+ * instead of powers of 10. The 'B' postfix multiplies the value for
+ * 8, and can be appended after another postfix or used alone. This
+ * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
+ * @param tail if non-NULL puts here the pointer to the char next
+ * after the last parsed character
+ */
+double av_strtod(const char *numstr, char **tail);
+
+#endif /* AVUTIL_EVAL_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/fifo.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/fifo.h
new file mode 100644
index 000000000..f10623930
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/fifo.h
@@ -0,0 +1,141 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a very simple circular buffer FIFO implementation
+ */
+
+#ifndef AVUTIL_FIFO_H
+#define AVUTIL_FIFO_H
+
+#include <stdint.h>
+#include "avutil.h"
+
+typedef struct AVFifoBuffer {
+ uint8_t *buffer;
+ uint8_t *rptr, *wptr, *end;
+ uint32_t rndx, wndx;
+} AVFifoBuffer;
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param size of FIFO
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ */
+AVFifoBuffer *av_fifo_alloc(unsigned int size);
+
+/**
+ * Free an AVFifoBuffer.
+ * @param f AVFifoBuffer to free
+ */
+void av_fifo_free(AVFifoBuffer *f);
+
+/**
+ * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
+ * @param f AVFifoBuffer to reset
+ */
+void av_fifo_reset(AVFifoBuffer *f);
+
+/**
+ * Return the amount of data in bytes in the AVFifoBuffer, that is the
+ * amount of data you can read from it.
+ * @param f AVFifoBuffer to read from
+ * @return size
+ */
+int av_fifo_size(AVFifoBuffer *f);
+
+/**
+ * Return the amount of space in bytes in the AVFifoBuffer, that is the
+ * amount of data you can write into it.
+ * @param f AVFifoBuffer to write into
+ * @return size
+ */
+int av_fifo_space(AVFifoBuffer *f);
+
+/**
+ * Feed data from an AVFifoBuffer to a user-supplied callback.
+ * @param f AVFifoBuffer to read from
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ */
+int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from a user-supplied callback to an AVFifoBuffer.
+ * @param f AVFifoBuffer to write to
+ * @param src data source; non-const since it may be used as a
+ * modifiable context by the function defined in func
+ * @param size number of bytes to write
+ * @param func generic write function; the first parameter is src,
+ * the second is dest_buf, the third is dest_buf_size.
+ * func must return the number of bytes written to dest_buf, or <= 0 to
+ * indicate no more data available to write.
+ * If func is NULL, src is interpreted as a simple byte array for source data.
+ * @return the number of bytes written to the FIFO
+ */
+int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
+
+/**
+ * Resize an AVFifoBuffer.
+ * @param f AVFifoBuffer to resize
+ * @param size new AVFifoBuffer size in bytes
+ * @return <0 for failure, >=0 otherwise
+ */
+int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
+
+/**
+ * Read and discard the specified amount of data from an AVFifoBuffer.
+ * @param f AVFifoBuffer to read from
+ * @param size amount of data to read in bytes
+ */
+void av_fifo_drain(AVFifoBuffer *f, int size);
+
+/**
+ * Return a pointer to the data stored in a FIFO buffer at a certain offset.
+ * The FIFO buffer is not modified.
+ *
+ * @param f AVFifoBuffer to peek at, f must be non-NULL
+ * @param offs an offset in bytes, its absolute value must be less
+ * than the used buffer size or the returned pointer will
+ * point outside to the buffer data.
+ * The used buffer size can be checked with av_fifo_size().
+ */
+static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
+{
+ uint8_t *ptr = f->rptr + offs;
+ if (ptr >= f->end)
+ ptr = f->buffer + (ptr - f->end);
+ else if (ptr < f->buffer)
+ ptr = f->end - (f->buffer - ptr);
+ return ptr;
+}
+
+#if FF_API_AV_FIFO_PEEK
+/**
+ * @deprecated Use av_fifo_peek2() instead.
+ */
+attribute_deprecated
+static inline uint8_t av_fifo_peek(AVFifoBuffer *f, int offs)
+{
+ return *av_fifo_peek2(f, offs);
+}
+#endif
+
+#endif /* AVUTIL_FIFO_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/file.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/file.h
new file mode 100644
index 000000000..c481c37f9
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/file.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FILE_H
+#define AVUTIL_FILE_H
+
+#include "avutil.h"
+
+/**
+ * @file
+ * Misc file utilities.
+ */
+
+/**
+ * Read the file with name filename, and put its content in a newly
+ * allocated buffer or map it with mmap() when available.
+ * In case of success set *bufptr to the read or mmapped buffer, and
+ * *size to the size in bytes of the buffer in *bufptr.
+ * The returned buffer must be released with av_file_unmap().
+ *
+ * @param log_offset loglevel offset used for logging
+ * @param log_ctx context used for logging
+ * @return a non negative number in case of success, a negative value
+ * corresponding to an AVERROR error code in case of failure
+ */
+int av_file_map(const char *filename, uint8_t **bufptr, size_t *size,
+ int log_offset, void *log_ctx);
+
+/**
+ * Unmap or free the buffer bufptr created by av_file_map().
+ *
+ * @param size size in bytes of bufptr, must be the same as returned
+ * by av_file_map()
+ */
+void av_file_unmap(uint8_t *bufptr, size_t size);
+
+#endif /* AVUTIL_FILE_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/imgutils.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/imgutils.h
new file mode 100644
index 000000000..3815a49ae
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/imgutils.h
@@ -0,0 +1,138 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_IMGUTILS_H
+#define AVUTIL_IMGUTILS_H
+
+/**
+ * @file
+ * misc image utilities
+ *
+ * @addtogroup lavu_picture
+ * @{
+ */
+
+#include "avutil.h"
+#include "pixdesc.h"
+
+/**
+ * Compute the max pixel step for each plane of an image with a
+ * format described by pixdesc.
+ *
+ * The pixel step is the distance in bytes between the first byte of
+ * the group of bytes which describe a pixel component and the first
+ * byte of the successive group in the same plane for the same
+ * component.
+ *
+ * @param max_pixsteps an array which is filled with the max pixel step
+ * for each plane. Since a plane may contain different pixel
+ * components, the computed max_pixsteps[plane] is relative to the
+ * component in the plane with the max pixel step.
+ * @param max_pixstep_comps an array which is filled with the component
+ * for each plane which has the max pixel step. May be NULL.
+ */
+void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
+ const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Compute the size of an image line with format pix_fmt and width
+ * width for the plane plane.
+ *
+ * @return the computed size in bytes
+ */
+int av_image_get_linesize(enum PixelFormat pix_fmt, int width, int plane);
+
+/**
+ * Fill plane linesizes for an image with pixel format pix_fmt and
+ * width width.
+ *
+ * @param linesizes array to be filled with the linesize for each plane
+ * @return >= 0 in case of success, a negative error code otherwise
+ */
+int av_image_fill_linesizes(int linesizes[4], enum PixelFormat pix_fmt, int width);
+
+/**
+ * Fill plane data pointers for an image with pixel format pix_fmt and
+ * height height.
+ *
+ * @param data pointers array to be filled with the pointer for each image plane
+ * @param ptr the pointer to a buffer which will contain the image
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_fill_pointers(uint8_t *data[4], enum PixelFormat pix_fmt, int height,
+ uint8_t *ptr, const int linesizes[4]);
+
+/**
+ * Allocate an image with size w and h and pixel format pix_fmt, and
+ * fill pointers and linesizes accordingly.
+ * The allocated image buffer has to be freed by using
+ * av_freep(&pointers[0]).
+ *
+ * @param align the value to use for buffer size alignment
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
+ int w, int h, enum PixelFormat pix_fmt, int align);
+
+/**
+ * Copy image plane from src to dst.
+ * That is, copy "height" number of lines of "bytewidth" bytes each.
+ * The first byte of each successive line is separated by *_linesize
+ * bytes.
+ *
+ * @param dst_linesize linesize for the image plane in dst
+ * @param src_linesize linesize for the image plane in src
+ */
+void av_image_copy_plane(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int bytewidth, int height);
+
+/**
+ * Copy image in src_data to dst_data.
+ *
+ * @param dst_linesizes linesizes for the image in dst_data
+ * @param src_linesizes linesizes for the image in src_data
+ */
+void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
+ const uint8_t *src_data[4], const int src_linesizes[4],
+ enum PixelFormat pix_fmt, int width, int height);
+
+/**
+ * Check if the given dimension of an image is valid, meaning that all
+ * bytes of the image can be addressed with a signed int.
+ *
+ * @param w the width of the picture
+ * @param h the height of the picture
+ * @param log_offset the offset to sum to the log level for logging with log_ctx
+ * @param log_ctx the parent logging context, it may be NULL
+ * @return >= 0 if valid, a negative error code otherwise
+ */
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);
+
+int ff_set_systematic_pal2(uint32_t pal[256], enum PixelFormat pix_fmt);
+
+/**
+ * @}
+ */
+
+
+#endif /* AVUTIL_IMGUTILS_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat.h
new file mode 100644
index 000000000..9db624a6c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+union av_intfloat32 {
+ uint32_t i;
+ float f;
+};
+
+union av_intfloat64 {
+ uint64_t i;
+ double f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i)
+{
+ union av_intfloat32 v = { .i = i };
+ return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f)
+{
+ union av_intfloat32 v = { .f = f };
+ return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i)
+{
+ union av_intfloat64 v = { .i = i };
+ return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f)
+{
+ union av_intfloat64 v = { .f = f };
+ return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat_readwrite.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat_readwrite.h
new file mode 100644
index 000000000..f093b92cd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/intfloat_readwrite.h
@@ -0,0 +1,40 @@
+/*
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_READWRITE_H
+#define AVUTIL_INTFLOAT_READWRITE_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+/* IEEE 80 bits extended float */
+typedef struct AVExtFloat {
+ uint8_t exponent[2];
+ uint8_t mantissa[8];
+} AVExtFloat;
+
+attribute_deprecated double av_int2dbl(int64_t v) av_const;
+attribute_deprecated float av_int2flt(int32_t v) av_const;
+attribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const;
+attribute_deprecated int64_t av_dbl2int(double d) av_const;
+attribute_deprecated int32_t av_flt2int(float d) av_const;
+attribute_deprecated AVExtFloat av_dbl2ext(double d) av_const;
+
+#endif /* AVUTIL_INTFLOAT_READWRITE_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/intreadwrite.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/intreadwrite.h
new file mode 100644
index 000000000..01eb27804
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/intreadwrite.h
@@ -0,0 +1,522 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTREADWRITE_H
+#define AVUTIL_INTREADWRITE_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+#include "bswap.h"
+
+typedef union {
+ uint64_t u64;
+ uint32_t u32[2];
+ uint16_t u16[4];
+ uint8_t u8 [8];
+ double f64;
+ float f32[2];
+} av_alias av_alias64;
+
+typedef union {
+ uint32_t u32;
+ uint16_t u16[2];
+ uint8_t u8 [4];
+ float f32;
+} av_alias av_alias32;
+
+typedef union {
+ uint16_t u16;
+ uint8_t u8 [2];
+} av_alias av_alias16;
+
+/*
+ * Arch-specific headers can provide any combination of
+ * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
+ * Preprocessor symbols must be defined, even if these are implemented
+ * as inline functions.
+ */
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_ARM
+# include "arm/intreadwrite.h"
+#elif ARCH_AVR32
+# include "avr32/intreadwrite.h"
+#elif ARCH_MIPS
+# include "mips/intreadwrite.h"
+#elif ARCH_PPC
+# include "ppc/intreadwrite.h"
+#elif ARCH_TOMI
+# include "tomi/intreadwrite.h"
+#elif ARCH_X86
+# include "x86/intreadwrite.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+/*
+ * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
+ */
+
+#if AV_HAVE_BIGENDIAN
+
+# if defined(AV_RN16) && !defined(AV_RB16)
+# define AV_RB16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RB16)
+# define AV_RN16(p) AV_RB16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WB16)
+# define AV_WB16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WB16)
+# define AV_WN16(p, v) AV_WB16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RB24)
+# define AV_RB24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RB24)
+# define AV_RN24(p) AV_RB24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WB24)
+# define AV_WB24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WB24)
+# define AV_WN24(p, v) AV_WB24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RB32)
+# define AV_RB32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RB32)
+# define AV_RN32(p) AV_RB32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WB32)
+# define AV_WB32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WB32)
+# define AV_WN32(p, v) AV_WB32(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RB64)
+# define AV_RB64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RB64)
+# define AV_RN64(p) AV_RB64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WB64)
+# define AV_WB64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WB64)
+# define AV_WN64(p, v) AV_WB64(p, v)
+# endif
+
+#else /* AV_HAVE_BIGENDIAN */
+
+# if defined(AV_RN16) && !defined(AV_RL16)
+# define AV_RL16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RL16)
+# define AV_RN16(p) AV_RL16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WL16)
+# define AV_WL16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WL16)
+# define AV_WN16(p, v) AV_WL16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RL24)
+# define AV_RL24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RL24)
+# define AV_RN24(p) AV_RL24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WL24)
+# define AV_WL24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WL24)
+# define AV_WN24(p, v) AV_WL24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RL32)
+# define AV_RL32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RL32)
+# define AV_RN32(p) AV_RL32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WL32)
+# define AV_WL32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WL32)
+# define AV_WN32(p, v) AV_WL32(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RL64)
+# define AV_RL64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RL64)
+# define AV_RN64(p) AV_RL64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WL64)
+# define AV_WL64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WL64)
+# define AV_WN64(p, v) AV_WL64(p, v)
+# endif
+
+#endif /* !AV_HAVE_BIGENDIAN */
+
+/*
+ * Define AV_[RW]N helper macros to simplify definitions not provided
+ * by per-arch headers.
+ */
+
+#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)
+
+union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
+union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
+union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
+
+# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
+# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
+
+#elif defined(__DECC)
+
+# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
+# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
+
+#elif AV_HAVE_FAST_UNALIGNED
+
+# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
+# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#else
+
+#ifndef AV_RB16
+# define AV_RB16(x) \
+ ((((const uint8_t*)(x))[0] << 8) | \
+ ((const uint8_t*)(x))[1])
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, d) do { \
+ ((uint8_t*)(p))[1] = (d); \
+ ((uint8_t*)(p))[0] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(x) \
+ ((((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(x) \
+ (((uint32_t)((const uint8_t*)(x))[0] << 24) | \
+ (((const uint8_t*)(x))[1] << 16) | \
+ (((const uint8_t*)(x))[2] << 8) | \
+ ((const uint8_t*)(x))[3])
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, d) do { \
+ ((uint8_t*)(p))[3] = (d); \
+ ((uint8_t*)(p))[2] = (d)>>8; \
+ ((uint8_t*)(p))[1] = (d)>>16; \
+ ((uint8_t*)(p))[0] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(x) \
+ (((uint32_t)((const uint8_t*)(x))[3] << 24) | \
+ (((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(x) \
+ (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[7])
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, d) do { \
+ ((uint8_t*)(p))[7] = (d); \
+ ((uint8_t*)(p))[6] = (d)>>8; \
+ ((uint8_t*)(p))[5] = (d)>>16; \
+ ((uint8_t*)(p))[4] = (d)>>24; \
+ ((uint8_t*)(p))[3] = (d)>>32; \
+ ((uint8_t*)(p))[2] = (d)>>40; \
+ ((uint8_t*)(p))[1] = (d)>>48; \
+ ((uint8_t*)(p))[0] = (d)>>56; \
+ } while(0)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(x) \
+ (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ ((uint8_t*)(p))[4] = (d)>>32; \
+ ((uint8_t*)(p))[5] = (d)>>40; \
+ ((uint8_t*)(p))[6] = (d)>>48; \
+ ((uint8_t*)(p))[7] = (d)>>56; \
+ } while(0)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RN(s, p) AV_RB##s(p)
+# define AV_WN(s, p, v) AV_WB##s(p, v)
+#else
+# define AV_RN(s, p) AV_RL##s(p)
+# define AV_WN(s, p, v) AV_WL##s(p, v)
+#endif
+
+#endif /* HAVE_FAST_UNALIGNED */
+
+#ifndef AV_RN16
+# define AV_RN16(p) AV_RN(16, p)
+#endif
+
+#ifndef AV_RN32
+# define AV_RN32(p) AV_RN(32, p)
+#endif
+
+#ifndef AV_RN64
+# define AV_RN64(p) AV_RN(64, p)
+#endif
+
+#ifndef AV_WN16
+# define AV_WN16(p, v) AV_WN(16, p, v)
+#endif
+
+#ifndef AV_WN32
+# define AV_WN32(p, v) AV_WN(32, p, v)
+#endif
+
+#ifndef AV_WN64
+# define AV_WN64(p, v) AV_WN(64, p, v)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RB(s, p) AV_RN##s(p)
+# define AV_WB(s, p, v) AV_WN##s(p, v)
+# define AV_RL(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
+#else
+# define AV_RB(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
+# define AV_RL(s, p) AV_RN##s(p)
+# define AV_WL(s, p, v) AV_WN##s(p, v)
+#endif
+
+#define AV_RB8(x) (((const uint8_t*)(x))[0])
+#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
+
+#define AV_RL8(x) AV_RB8(x)
+#define AV_WL8(p, d) AV_WB8(p, d)
+
+#ifndef AV_RB16
+# define AV_RB16(p) AV_RB(16, p)
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, v) AV_WB(16, p, v)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(p) AV_RL(16, p)
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, v) AV_WL(16, p, v)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(p) AV_RB(32, p)
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, v) AV_WB(32, p, v)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(p) AV_RL(32, p)
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, v) AV_WL(32, p, v)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(p) AV_RB(64, p)
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, v) AV_WB(64, p, v)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(p) AV_RL(64, p)
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, v) AV_WL(64, p, v)
+#endif
+
+#ifndef AV_RB24
+# define AV_RB24(x) \
+ ((((const uint8_t*)(x))[0] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[2])
+#endif
+#ifndef AV_WB24
+# define AV_WB24(p, d) do { \
+ ((uint8_t*)(p))[2] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[0] = (d)>>16; \
+ } while(0)
+#endif
+
+#ifndef AV_RL24
+# define AV_RL24(x) \
+ ((((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL24
+# define AV_WL24(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ } while(0)
+#endif
+
+/*
+ * The AV_[RW]NA macros access naturally aligned data
+ * in a type-safe way.
+ */
+
+#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
+#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#ifndef AV_RN16A
+# define AV_RN16A(p) AV_RNA(16, p)
+#endif
+
+#ifndef AV_RN32A
+# define AV_RN32A(p) AV_RNA(32, p)
+#endif
+
+#ifndef AV_RN64A
+# define AV_RN64A(p) AV_RNA(64, p)
+#endif
+
+#ifndef AV_WN16A
+# define AV_WN16A(p, v) AV_WNA(16, p, v)
+#endif
+
+#ifndef AV_WN32A
+# define AV_WN32A(p, v) AV_WNA(32, p, v)
+#endif
+
+#ifndef AV_WN64A
+# define AV_WN64A(p, v) AV_WNA(64, p, v)
+#endif
+
+/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
+ * naturally aligned. They may be implemented using MMX,
+ * so emms_c() must be called before using any float code
+ * afterwards.
+ */
+
+#define AV_COPY(n, d, s) \
+ (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
+
+#ifndef AV_COPY16
+# define AV_COPY16(d, s) AV_COPY(16, d, s)
+#endif
+
+#ifndef AV_COPY32
+# define AV_COPY32(d, s) AV_COPY(32, d, s)
+#endif
+
+#ifndef AV_COPY64
+# define AV_COPY64(d, s) AV_COPY(64, d, s)
+#endif
+
+#ifndef AV_COPY128
+# define AV_COPY128(d, s) \
+ do { \
+ AV_COPY64(d, s); \
+ AV_COPY64((char*)(d)+8, (char*)(s)+8); \
+ } while(0)
+#endif
+
+#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
+
+#ifndef AV_SWAP64
+# define AV_SWAP64(a, b) AV_SWAP(64, a, b)
+#endif
+
+#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
+
+#ifndef AV_ZERO16
+# define AV_ZERO16(d) AV_ZERO(16, d)
+#endif
+
+#ifndef AV_ZERO32
+# define AV_ZERO32(d) AV_ZERO(32, d)
+#endif
+
+#ifndef AV_ZERO64
+# define AV_ZERO64(d) AV_ZERO(64, d)
+#endif
+
+#ifndef AV_ZERO128
+# define AV_ZERO128(d) \
+ do { \
+ AV_ZERO64(d); \
+ AV_ZERO64((char*)(d)+8); \
+ } while(0)
+#endif
+
+#endif /* AVUTIL_INTREADWRITE_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/lfg.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/lfg.h
new file mode 100644
index 000000000..904d00a66
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/lfg.h
@@ -0,0 +1,62 @@
+/*
+ * Lagged Fibonacci PRNG
+ * Copyright (c) 2008 Michael Niedermayer
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LFG_H
+#define AVUTIL_LFG_H
+
+typedef struct {
+ unsigned int state[64];
+ int index;
+} AVLFG;
+
+void av_lfg_init(AVLFG *c, unsigned int seed);
+
+/**
+ * Get the next random unsigned 32-bit number using an ALFG.
+ *
+ * Please also consider a simple LCG like state= state*1664525+1013904223,
+ * it may be good enough and faster for your specific use case.
+ */
+static inline unsigned int av_lfg_get(AVLFG *c){
+ c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63];
+ return c->state[c->index++ & 63];
+}
+
+/**
+ * Get the next random unsigned 32-bit number using a MLFG.
+ *
+ * Please also consider av_lfg_get() above, it is faster.
+ */
+static inline unsigned int av_mlfg_get(AVLFG *c){
+ unsigned int a= c->state[(c->index-55) & 63];
+ unsigned int b= c->state[(c->index-24) & 63];
+ return c->state[c->index++ & 63] = 2*a*b+a+b;
+}
+
+/**
+ * Get the next two numbers generated by a Box-Muller Gaussian
+ * generator using the random numbers issued by lfg.
+ *
+ * @param out array where the two generated numbers are placed
+ */
+void av_bmg_get(AVLFG *lfg, double out[2]);
+
+#endif /* AVUTIL_LFG_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/log.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/log.h
new file mode 100644
index 000000000..0678e1a3b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/log.h
@@ -0,0 +1,172 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption *option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for loging is stored.
+ * for example a decoder that uses eval.c could pass its AVCodecContext to eval as such
+ * parent context. And a av_log() implementation could then display the parent context
+ * can be NULL of course
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void *obj, void *prev);
+
+ /**
+ * Return an AVClass corresponding to next potential
+ * AVOptions-enabled child.
+ *
+ * The difference between child_next and this is that
+ * child_next iterates over _already existing_ objects, while
+ * child_class_next iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_next)(const struct AVClass *prev);
+} AVClass;
+
+/* av_log API */
+
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+#define AV_LOG_INFO 32
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different av_vlog callback
+ * function.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message, lower values signifying
+ * higher importance.
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @see av_vlog
+ */
+void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+
+void av_vlog(void *avcl, int level, const char *fmt, va_list);
+int av_log_get_level(void);
+void av_log_set_level(int);
+void av_log_set_callback(void (*)(void*, int, const char*, va_list));
+void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl);
+const char* av_default_item_name(void* ctx);
+
+/**
+ * av_dlog macros
+ * Useful to print debug messages that shouldn't get compiled in normally.
+ */
+
+#ifdef DEBUG
+# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define av_dlog(pctx, ...)
+#endif
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+void av_log_set_flags(int arg);
+
+#endif /* AVUTIL_LOG_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/lzo.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/lzo.h
new file mode 100644
index 000000000..1b774a53b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/lzo.h
@@ -0,0 +1,77 @@
+/*
+ * LZO 1x decompression
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LZO_H
+#define AVUTIL_LZO_H
+
+/**
+ * @defgroup lavu_lzo LZO
+ * @ingroup lavu_crypto
+ *
+ * @{
+ */
+
+#include <stdint.h>
+
+/** @name Error flags returned by av_lzo1x_decode
+ * @{ */
+/// end of the input buffer reached before decoding finished
+#define AV_LZO_INPUT_DEPLETED 1
+/// decoded data did not fit into output buffer
+#define AV_LZO_OUTPUT_FULL 2
+/// a reference to previously decoded data was wrong
+#define AV_LZO_INVALID_BACKPTR 4
+/// a non-specific error in the compressed bitstream
+#define AV_LZO_ERROR 8
+/** @} */
+
+#define AV_LZO_INPUT_PADDING 8
+#define AV_LZO_OUTPUT_PADDING 12
+
+/**
+ * @brief Decodes LZO 1x compressed data.
+ * @param out output buffer
+ * @param outlen size of output buffer, number of bytes left are returned here
+ * @param in input buffer
+ * @param inlen size of input buffer, number of bytes left are returned here
+ * @return 0 on success, otherwise a combination of the error flags above
+ *
+ * Make sure all buffers are appropriately padded, in must provide
+ * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.
+ */
+int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);
+
+/**
+ * @brief deliberately overlapping memcpy implementation
+ * @param dst destination buffer; must be padded with 12 additional bytes
+ * @param back how many bytes back we start (the initial size of the overlapping window)
+ * @param cnt number of bytes to copy, must be >= 0
+ *
+ * cnt > back is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of back.
+ */
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LZO_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/mathematics.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/mathematics.h
new file mode 100644
index 000000000..0b072ebe6
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/mathematics.h
@@ -0,0 +1,122 @@
+/*
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+
+#ifndef M_E
+#define M_E 2.7182818284590452354 /* e */
+#endif
+#ifndef M_LN2
+#define M_LN2 0.69314718055994530942 /* log_e 2 */
+#endif
+#ifndef M_LN10
+#define M_LN10 2.30258509299404568402 /* log_e 10 */
+#endif
+#ifndef M_LOG2_10
+#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_PHI
+#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef M_PI
+#define M_PI 3.14159265358979323846 /* pi */
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT2
+#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
+#endif
+#ifndef NAN
+#define NAN (0.0/0.0)
+#endif
+#ifndef INFINITY
+#define INFINITY (1.0/0.0)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
+};
+
+/**
+ * Return the greatest common divisor of a and b.
+ * If both a and b are 0 or either or both are <0 then behavior is
+ * undefined.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Compare 2 timestamps each in its own timebases.
+ * The result of the function is undefined if one of the timestamps
+ * is outside the int64_t range when represented in the others timebase.
+ * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare 2 integers modulo mod.
+ * That is we compare integers a and b for which only the least
+ * significant log2(mod) bits are known.
+ *
+ * @param mod must be a power of 2
+ * @return a negative value if a is smaller than b
+ * a positive value if a is greater than b
+ * 0 if a equals b
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/md5.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/md5.h
new file mode 100644
index 000000000..1412ee240
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/md5.h
@@ -0,0 +1,46 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MD5_H
+#define AVUTIL_MD5_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_md5 MD5
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_md5_size;
+
+struct AVMD5;
+
+void av_md5_init(struct AVMD5 *ctx);
+void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, const int len);
+void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
+void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MD5_H */
+
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/mem.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/mem.h
new file mode 100644
index 000000000..cd8490b2d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/mem.h
@@ -0,0 +1,136 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include "attributes.h"
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_mem
+ * @{
+ */
+
+
+#if defined(__ICC) && _ICC < 1200 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__TI_COMPILER_VERSION__)
+ #define DECLARE_ALIGNED(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ t __attribute__((aligned(n))) v
+ #define DECLARE_ASM_CONST(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ static const t __attribute__((aligned(n))) v
+#elif defined(__GNUC__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+ #define av_malloc_attrib __attribute__((__malloc__))
+#else
+ #define av_malloc_attrib
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+ #define av_alloc_size(n) __attribute__((alloc_size(n)))
+#else
+ #define av_alloc_size(n)
+#endif
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU).
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ */
+void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_malloc(z)() or av_realloc() or NULL.
+ * @param size Size in bytes for the memory block to be allocated or
+ * reallocated.
+ * @return Pointer to a newly reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @see av_fast_realloc()
+ */
+void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc().
+ * @param ptr Pointer to the memory block which should be freed.
+ * @note ptr = NULL is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead.
+ * @see av_freep()
+ */
+void av_free(void *ptr);
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ * @see av_malloc()
+ */
+void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Duplicate the string s.
+ * @param s string to be duplicated
+ * @return Pointer to a newly allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strdup(const char *s) av_malloc_attrib;
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc() and set the pointer pointing to it to NULL.
+ * @param ptr Pointer to the pointer to the memory block which should
+ * be freed.
+ * @see av_free()
+ */
+void av_freep(void *ptr);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/old_pix_fmts.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/old_pix_fmts.h
new file mode 100644
index 000000000..57b699220
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/old_pix_fmts.h
@@ -0,0 +1,171 @@
+/*
+ * copyright (c) 2006-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OLD_PIX_FMTS_H
+#define AVUTIL_OLD_PIX_FMTS_H
+
+/*
+ * This header exists to prevent new pixel formats from being accidentally added
+ * to the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVPixelFormat enum instead.
+ */
+ PIX_FMT_NONE = AV_PIX_FMT_NONE,
+ PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ PIX_FMT_GRAY8, ///< Y , 8bpp
+ PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+ PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ PIX_FMT_XVMC_MPEG2_IDCT,
+ PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+ PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha
+ PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+ //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus
+ //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately
+ //is better
+ PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+
+#ifdef AV_PIX_FMT_ABI_GIT_MASTER
+ PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+ PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian
+ PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian
+ PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian
+ PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian
+ PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian
+ PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian
+
+#ifndef AV_PIX_FMT_ABI_GIT_MASTER
+ PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+ PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
+ PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
+ PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
+ PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
+ PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+
+ PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian
+ PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian
+ PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian
+ PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian
+
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+#endif /* AVUTIL_OLD_PIX_FMTS_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/opt.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/opt.h
new file mode 100644
index 000000000..19549408e
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/opt.h
@@ -0,0 +1,591 @@
+/*
+ * AVOptions
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OPT_H
+#define AVUTIL_OPT_H
+
+/**
+ * @file
+ * AVOptions
+ */
+
+#include "rational.h"
+#include "avutil.h"
+#include "dict.h"
+#include "log.h"
+
+/**
+ * @defgroup avoptions AVOptions
+ * @ingroup lavu_data
+ * @{
+ * AVOptions provide a generic system to declare options on arbitrary structs
+ * ("objects"). An option can have a help text, a type and a range of possible
+ * values. Options may then be enumerated, read and written to.
+ *
+ * @section avoptions_implement Implementing AVOptions
+ * This section describes how to add AVOptions capabilities to a struct.
+ *
+ * All AVOptions-related information is stored in an AVClass. Therefore
+ * the first member of the struct must be a pointer to an AVClass describing it.
+ * The option field of the AVClass must be set to a NULL-terminated static array
+ * of AVOptions. Each AVOption must have a non-empty name, a type, a default
+ * value and for number-type AVOptions also a range of allowed values. It must
+ * also declare an offset in bytes from the start of the struct, where the field
+ * associated with this AVOption is located. Other fields in the AVOption struct
+ * should also be set when applicable, but are not required.
+ *
+ * The following example illustrates an AVOptions-enabled struct:
+ * @code
+ * typedef struct test_struct {
+ * AVClass *class;
+ * int int_opt;
+ * char *str_opt;
+ * uint8_t *bin_opt;
+ * int bin_len;
+ * } test_struct;
+ *
+ * static const AVOption options[] = {
+ * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt),
+ * AV_OPT_TYPE_INT, { -1 }, INT_MIN, INT_MAX },
+ * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt),
+ * AV_OPT_TYPE_STRING },
+ * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt),
+ * AV_OPT_TYPE_BINARY },
+ * { NULL },
+ * };
+ *
+ * static const AVClass test_class = {
+ * .class_name = "test class",
+ * .item_name = av_default_item_name,
+ * .option = options,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ * @endcode
+ *
+ * Next, when allocating your struct, you must ensure that the AVClass pointer
+ * is set to the correct value. Then, av_opt_set_defaults() must be called to
+ * initialize defaults. After that the struct is ready to be used with the
+ * AVOptions API.
+ *
+ * When cleaning up, you may use the av_opt_free() function to automatically
+ * free all the allocated string and binary options.
+ *
+ * Continuing with the above example:
+ *
+ * @code
+ * test_struct *alloc_test_struct(void)
+ * {
+ * test_struct *ret = av_malloc(sizeof(*ret));
+ * ret->class = &test_class;
+ * av_opt_set_defaults(ret);
+ * return ret;
+ * }
+ * void free_test_struct(test_struct **foo)
+ * {
+ * av_opt_free(*foo);
+ * av_freep(foo);
+ * }
+ * @endcode
+ *
+ * @subsection avoptions_implement_nesting Nesting
+ * It may happen that an AVOptions-enabled struct contains another
+ * AVOptions-enabled struct as a member (e.g. AVCodecContext in
+ * libavcodec exports generic options, while its priv_data field exports
+ * codec-specific options). In such a case, it is possible to set up the
+ * parent struct to export a child's options. To do that, simply
+ * implement AVClass.child_next() and AVClass.child_class_next() in the
+ * parent struct's AVClass.
+ * Assuming that the test_struct from above now also contains a
+ * child_struct field:
+ *
+ * @code
+ * typedef struct child_struct {
+ * AVClass *class;
+ * int flags_opt;
+ * } child_struct;
+ * static const AVOption child_opts[] = {
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { 0 }, INT_MIN, INT_MAX },
+ * { NULL },
+ * };
+ * static const AVClass child_class = {
+ * .class_name = "child class",
+ * .item_name = av_default_item_name,
+ * .option = child_opts,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ *
+ * void *child_next(void *obj, void *prev)
+ * {
+ * test_struct *t = obj;
+ * if (!prev && t->child_struct)
+ * return t->child_struct;
+ * return NULL
+ * }
+ * const AVClass child_class_next(const AVClass *prev)
+ * {
+ * return prev ? NULL : &child_class;
+ * }
+ * @endcode
+ * Putting child_next() and child_class_next() as defined above into
+ * test_class will now make child_struct's options accessible through
+ * test_struct (again, proper setup as described above needs to be done on
+ * child_struct right after it is created).
+ *
+ * From the above example it might not be clear why both child_next()
+ * and child_class_next() are needed. The distinction is that child_next()
+ * iterates over actually existing objects, while child_class_next()
+ * iterates over all possible child classes. E.g. if an AVCodecContext
+ * was initialized to use a codec which has private options, then its
+ * child_next() will return AVCodecContext.priv_data and finish
+ * iterating. OTOH child_class_next() on AVCodecContext.av_class will
+ * iterate over all available codecs with private options.
+ *
+ * @subsection avoptions_implement_named_constants Named constants
+ * It is possible to create named constants for options. Simply set the unit
+ * field of the option the constants should apply to to a string and
+ * create the constants themselves as options of type AV_OPT_TYPE_CONST
+ * with their unit field set to the same string.
+ * Their default_val field should contain the value of the named
+ * constant.
+ * For example, to add some named constants for the test_flags option
+ * above, put the following into the child_opts array:
+ * @code
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { 0 }, INT_MIN, INT_MAX, "test_unit" },
+ * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { 16 }, 0, 0, "test_unit" },
+ * @endcode
+ *
+ * @section avoptions_use Using AVOptions
+ * This section deals with accessing options in an AVOptions-enabled struct.
+ * Such structs in Libav are e.g. AVCodecContext in libavcodec or
+ * AVFormatContext in libavformat.
+ *
+ * @subsection avoptions_use_examine Examining AVOptions
+ * The basic functions for examining options are av_opt_next(), which iterates
+ * over all options defined for one object, and av_opt_find(), which searches
+ * for an option with the given name.
+ *
+ * The situation is more complicated with nesting. An AVOptions-enabled struct
+ * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag
+ * to av_opt_find() will make the function search children recursively.
+ *
+ * For enumerating there are basically two cases. The first is when you want to
+ * get all options that may potentially exist on the struct and its children
+ * (e.g. when constructing documentation). In that case you should call
+ * av_opt_child_class_next() recursively on the parent struct's AVClass. The
+ * second case is when you have an already initialized struct with all its
+ * children and you want to get all options that can be actually written or read
+ * from it. In that case you should call av_opt_child_next() recursively (and
+ * av_opt_next() on each result).
+ *
+ * @subsection avoptions_use_get_set Reading and writing AVOptions
+ * When setting options, you often have a string read directly from the
+ * user. In such a case, simply passing it to av_opt_set() is enough. For
+ * non-string type options, av_opt_set() will parse the string according to the
+ * option type.
+ *
+ * Similarly av_opt_get() will read any option type and convert it to a string
+ * which will be returned. Do not forget that the string is allocated, so you
+ * have to free it with av_free().
+ *
+ * In some cases it may be more convenient to put all options into an
+ * AVDictionary and call av_opt_set_dict() on it. A specific case of this
+ * are the format/codec open functions in lavf/lavc which take a dictionary
+ * filled with option as a parameter. This allows to set some options
+ * that cannot be set otherwise, since e.g. the input file format is not known
+ * before the file is actually opened.
+ */
+
+enum AVOptionType{
+ AV_OPT_TYPE_FLAGS,
+ AV_OPT_TYPE_INT,
+ AV_OPT_TYPE_INT64,
+ AV_OPT_TYPE_DOUBLE,
+ AV_OPT_TYPE_FLOAT,
+ AV_OPT_TYPE_STRING,
+ AV_OPT_TYPE_RATIONAL,
+ AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length
+ AV_OPT_TYPE_CONST = 128,
+#if FF_API_OLD_AVOPTIONS
+ FF_OPT_TYPE_FLAGS = 0,
+ FF_OPT_TYPE_INT,
+ FF_OPT_TYPE_INT64,
+ FF_OPT_TYPE_DOUBLE,
+ FF_OPT_TYPE_FLOAT,
+ FF_OPT_TYPE_STRING,
+ FF_OPT_TYPE_RATIONAL,
+ FF_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length
+ FF_OPT_TYPE_CONST=128,
+#endif
+};
+
+/**
+ * AVOption
+ */
+typedef struct AVOption {
+ const char *name;
+
+ /**
+ * short English help text
+ * @todo What about other languages?
+ */
+ const char *help;
+
+ /**
+ * The offset relative to the context structure where the option
+ * value is stored. It should be 0 for named constants.
+ */
+ int offset;
+ enum AVOptionType type;
+
+ /**
+ * the default value for scalar options
+ */
+ union {
+ double dbl;
+ const char *str;
+ /* TODO those are unused now */
+ int64_t i64;
+ AVRational q;
+ } default_val;
+ double min; ///< minimum valid value for the option
+ double max; ///< maximum valid value for the option
+
+ int flags;
+#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding
+#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding
+#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ...
+#define AV_OPT_FLAG_AUDIO_PARAM 8
+#define AV_OPT_FLAG_VIDEO_PARAM 16
+#define AV_OPT_FLAG_SUBTITLE_PARAM 32
+//FIXME think about enc-audio, ... style flags
+
+ /**
+ * The logical unit to which the option belongs. Non-constant
+ * options and corresponding named constants share the same
+ * unit. May be NULL.
+ */
+ const char *unit;
+} AVOption;
+
+#if FF_API_FIND_OPT
+/**
+ * Look for an option in obj. Look only for the options which
+ * have the flags set as specified in mask and flags (that is,
+ * for which it is the case that opt->flags & mask == flags).
+ *
+ * @param[in] obj a pointer to a struct whose first element is a
+ * pointer to an AVClass
+ * @param[in] name the name of the option to look for
+ * @param[in] unit the unit of the option to look for, or any if NULL
+ * @return a pointer to the option found, or NULL if no option
+ * has been found
+ *
+ * @deprecated use av_opt_find.
+ */
+attribute_deprecated
+const AVOption *av_find_opt(void *obj, const char *name, const char *unit, int mask, int flags);
+#endif
+
+#if FF_API_OLD_AVOPTIONS
+/**
+ * Set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an
+ * AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. If the field is not of a string
+ * type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param[out] o_out if non-NULL put here a pointer to the AVOption
+ * found
+ * @param alloc this parameter is currently ignored
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ * @deprecated use av_opt_set()
+ */
+attribute_deprecated
+int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out);
+
+attribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n);
+attribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n);
+attribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n);
+
+attribute_deprecated double av_get_double(void *obj, const char *name, const AVOption **o_out);
+attribute_deprecated AVRational av_get_q(void *obj, const char *name, const AVOption **o_out);
+attribute_deprecated int64_t av_get_int(void *obj, const char *name, const AVOption **o_out);
+attribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len);
+attribute_deprecated const AVOption *av_next_option(void *obj, const AVOption *last);
+#endif
+
+/**
+ * Show the obj options.
+ *
+ * @param req_flags requested flags for the options to show. Show only the
+ * options for which it is opt->flags & req_flags.
+ * @param rej_flags rejected flags for the options to show. Show only the
+ * options for which it is !(opt->flags & req_flags).
+ * @param av_log_obj log context to use for showing the options
+ */
+int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);
+
+/**
+ * Set the values of all AVOption fields to their default values.
+ *
+ * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)
+ */
+void av_opt_set_defaults(void *s);
+
+#if FF_API_OLD_AVOPTIONS
+attribute_deprecated
+void av_opt_set_defaults2(void *s, int mask, int flags);
+#endif
+
+/**
+ * Parse the key/value pairs list in opts. For each key/value pair
+ * found, stores the value in the field in ctx that is named like the
+ * key. ctx must be an AVClass context, storing is done using
+ * AVOptions.
+ *
+ * @param key_val_sep a 0-terminated list of characters used to
+ * separate key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @return the number of successfully set key/value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_set_string3() if a key/value pair
+ * cannot be set
+ */
+int av_set_options_string(void *ctx, const char *opts,
+ const char *key_val_sep, const char *pairs_sep);
+
+/**
+ * Free all string and binary options in obj.
+ */
+void av_opt_free(void *obj);
+
+/**
+ * Check whether a particular flag is set in a flags field.
+ *
+ * @param field_name the name of the flag field option
+ * @param flag_name the name of the flag to check
+ * @return non-zero if the flag is set, zero if the flag isn't set,
+ * isn't of the right type, or the flags field doesn't exist.
+ */
+int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);
+
+/*
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ * by a new one containing all options not found in obj.
+ * Of course this new dictionary needs to be freed by caller
+ * with av_dict_free().
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ * but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict(void *obj, struct AVDictionary **options);
+
+/**
+ * @defgroup opt_eval_funcs Evaluating option strings
+ * @{
+ * This group of functions can be used to evaluate option strings
+ * and get numbers out of them. They do the same thing as av_opt_set(),
+ * except the result is written into the caller-supplied pointer.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass.
+ * @param o an option for which the string is to be evaluated.
+ * @param val string to be evaluated.
+ * @param *_out value of the string will be written here.
+ *
+ * @return 0 on success, a negative number on failure.
+ */
+int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out);
+int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out);
+int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out);
+int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out);
+int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out);
+int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out);
+/**
+ * @}
+ */
+
+#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the
+ given object first. */
+/**
+ * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass
+ * instead of a required pointer to a struct containing AVClass. This is
+ * useful for searching for options without needing to allocate the corresponding
+ * object.
+ */
+#define AV_OPT_SEARCH_FAKE_OBJ 0x0002
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ *
+ * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable
+ * directly with av_set_string3(). Use special calls which take an options
+ * AVDictionary (e.g. avformat_open_input()) to set options found with this
+ * flag.
+ */
+const AVOption *av_opt_find(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags);
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ * @param[out] target_obj if non-NULL, an object to which the option belongs will be
+ * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present
+ * in search_flags. This parameter is ignored if search_flags contain
+ * AV_OPT_SEARCH_FAKE_OBJ.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ */
+const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags, void **target_obj);
+
+/**
+ * Iterate over all AVOptions belonging to obj.
+ *
+ * @param obj an AVOptions-enabled struct or a double pointer to an
+ * AVClass describing it.
+ * @param prev result of the previous call to av_opt_next() on this object
+ * or NULL
+ * @return next AVOption or NULL
+ */
+const AVOption *av_opt_next(void *obj, const AVOption *prev);
+
+/**
+ * Iterate over AVOptions-enabled children of obj.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return next AVOptions-enabled child or NULL
+ */
+void *av_opt_child_next(void *obj, void *prev);
+
+/**
+ * Iterate over potential AVOptions-enabled children of parent.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return AVClass corresponding to next potential child or NULL
+ */
+const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);
+
+/**
+ * @defgroup opt_set_funcs Option setting functions
+ * @{
+ * Those functions set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. In case of av_opt_set() if the field is not
+ * of a string type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be set on a child of obj.
+ *
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ */
+int av_opt_set (void *obj, const char *name, const char *val, int search_flags);
+int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags);
+int av_opt_set_double(void *obj, const char *name, double val, int search_flags);
+int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags);
+/**
+ * @}
+ */
+
+/**
+ * @defgroup opt_get_funcs Option getting functions
+ * @{
+ * Those functions get a value of the option with the given name from an object.
+ *
+ * @param[in] obj a struct whose first element is a pointer to an AVClass.
+ * @param[in] name name of the option to get.
+ * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be found in a child of obj.
+ * @param[out] out_val value of the option will be written here
+ * @return 0 on success, a negative error code otherwise
+ */
+/**
+ * @note the returned string will av_malloc()ed and must be av_free()ed by the caller
+ */
+int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val);
+int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val);
+int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val);
+int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val);
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_OPT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/parseutils.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/parseutils.h
new file mode 100644
index 000000000..0844abb2f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/parseutils.h
@@ -0,0 +1,124 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PARSEUTILS_H
+#define AVUTIL_PARSEUTILS_H
+
+#include <time.h>
+
+#include "rational.h"
+
+/**
+ * @file
+ * misc parsing utilities
+ */
+
+/**
+ * Parse str and put in width_ptr and height_ptr the detected values.
+ *
+ * @param[in,out] width_ptr pointer to the variable which will contain the detected
+ * width value
+ * @param[in,out] height_ptr pointer to the variable which will contain the detected
+ * height value
+ * @param[in] str the string to parse: it has to be a string in the format
+ * width x height or a valid video size abbreviation.
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);
+
+/**
+ * Parse str and store the detected values in *rate.
+ *
+ * @param[in,out] rate pointer to the AVRational which will contain the detected
+ * frame rate
+ * @param[in] str the string to parse: it has to be a string in the format
+ * rate_num / rate_den, a float number or a valid video rate abbreviation
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_rate(AVRational *rate, const char *str);
+
+/**
+ * Put the RGBA values that correspond to color_string in rgba_color.
+ *
+ * @param color_string a string specifying a color. It can be the name of
+ * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,
+ * possibly followed by "@" and a string representing the alpha
+ * component.
+ * The alpha component may be a string composed by "0x" followed by an
+ * hexadecimal number or a decimal number between 0.0 and 1.0, which
+ * represents the opacity value (0x00/0.0 means completely transparent,
+ * 0xff/1.0 completely opaque).
+ * If the alpha component is not specified then 0xff is assumed.
+ * The string "random" will result in a random color.
+ * @param slen length of the initial part of color_string containing the
+ * color. It can be set to -1 if color_string is a null terminated string
+ * containing nothing else than the color.
+ * @return >= 0 in case of success, a negative value in case of
+ * failure (for example if color_string cannot be parsed).
+ */
+int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+ void *log_ctx);
+
+/**
+ * Parse timestr and return in *time a corresponding number of
+ * microseconds.
+ *
+ * @param timeval puts here the number of microseconds corresponding
+ * to the string in timestr. If the string represents a duration, it
+ * is the number of microseconds contained in the time interval. If
+ * the string is a date, is the number of microseconds since 1st of
+ * January, 1970 up to the time of the parsed date. If timestr cannot
+ * be successfully parsed, set *time to INT64_MIN.
+
+ * @param timestr a string representing a date or a duration.
+ * - If a date the syntax is:
+ * @code
+ * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH[:MM[:SS[.m...]]]}|{HH[MM[SS[.m...]]]}}[Z]
+ * now
+ * @endcode
+ * If the value is "now" it takes the current time.
+ * Time is local time unless Z is appended, in which case it is
+ * interpreted as UTC.
+ * If the year-month-day part is not specified it takes the current
+ * year-month-day.
+ * - If a duration the syntax is:
+ * @code
+ * [-]HH[:MM[:SS[.m...]]]
+ * [-]S+[.m...]
+ * @endcode
+ * @param duration flag which tells how to interpret timestr, if not
+ * zero timestr is interpreted as a duration, otherwise as a date
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_parse_time(int64_t *timeval, const char *timestr, int duration);
+
+/**
+ * Attempt to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+/**
+ * Convert the decomposed UTC time in tm to a time_t value.
+ */
+time_t av_timegm(struct tm *tm);
+
+#endif /* AVUTIL_PARSEUTILS_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/pixdesc.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/pixdesc.h
new file mode 100644
index 000000000..b5972c78f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/pixdesc.h
@@ -0,0 +1,177 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXDESC_H
+#define AVUTIL_PIXDESC_H
+
+#include <inttypes.h>
+#include "pixfmt.h"
+
+typedef struct AVComponentDescriptor{
+ uint16_t plane :2; ///< which of the 4 planes contains the component
+
+ /**
+ * Number of elements between 2 horizontally consecutive pixels minus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t step_minus1 :3;
+
+ /**
+ * Number of elements before the component of the first pixel plus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t offset_plus1 :3;
+ uint16_t shift :3; ///< number of least significant bits that must be shifted away to get the value
+ uint16_t depth_minus1 :4; ///< number of bits in the component minus 1
+}AVComponentDescriptor;
+
+/**
+ * Descriptor that unambiguously describes how the bits of a pixel are
+ * stored in the up to 4 data planes of an image. It also stores the
+ * subsampling factors and number of components.
+ *
+ * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV
+ * and all the YUV variants) AVPixFmtDescriptor just stores how values
+ * are stored not what these values represent.
+ */
+typedef struct AVPixFmtDescriptor{
+ const char *name;
+ uint8_t nb_components; ///< The number of components each pixel has, (1-4)
+
+ /**
+ * Amount to shift the luma width right to find the chroma width.
+ * For YV12 this is 1 for example.
+ * chroma_width = -((-luma_width) >> log2_chroma_w)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w)
+
+ /**
+ * Amount to shift the luma height right to find the chroma height.
+ * For YV12 this is 1 for example.
+ * chroma_height= -((-luma_height) >> log2_chroma_h)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_h;
+ uint8_t flags;
+
+ /**
+ * Parameters that describe how pixels are packed. If the format
+ * has chroma components, they must be stored in comp[1] and
+ * comp[2].
+ */
+ AVComponentDescriptor comp[4];
+}AVPixFmtDescriptor;
+
+#define PIX_FMT_BE 1 ///< Pixel format is big-endian.
+#define PIX_FMT_PAL 2 ///< Pixel format has a palette in data[1], values are indexes in this palette.
+#define PIX_FMT_BITSTREAM 4 ///< All values of a component are bit-wise packed end to end.
+#define PIX_FMT_HWACCEL 8 ///< Pixel format is an HW accelerated format.
+#define PIX_FMT_PLANAR 16 ///< At least one pixel component is not in the first data plane
+#define PIX_FMT_RGB 32 ///< The pixel format contains RGB-like data (as opposed to YUV/grayscale)
+
+/**
+ * The array of all the pixel format descriptors.
+ */
+extern const AVPixFmtDescriptor av_pix_fmt_descriptors[];
+
+/**
+ * Read a line from an image, and write the values of the
+ * pixel format component c to dst.
+ *
+ * @param data the array containing the pointers to the planes of the image
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to read
+ * @param y the vertical coordinate of the first pixel to read
+ * @param w the width of the line to read, that is the number of
+ * values to write to dst
+ * @param read_pal_component if not zero and the format is a paletted
+ * format writes the values corresponding to the palette
+ * component c in data[1] to dst, rather than the palette indexes in
+ * data[0]. The behavior is undefined if the format is not paletted.
+ */
+void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component);
+
+/**
+ * Write the values from src to the pixel format component c of an
+ * image line.
+ *
+ * @param src array containing the values to write
+ * @param data the array containing the pointers to the planes of the
+ * image to write into. It is supposed to be zeroed.
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to write
+ * @param y the vertical coordinate of the first pixel to write
+ * @param w the width of the line to write, that is the number of
+ * values to write to the image line
+ */
+void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc, int x, int y, int c, int w);
+
+/**
+ * Return the pixel format corresponding to name.
+ *
+ * If there is no pixel format with name name, then looks for a
+ * pixel format with the name corresponding to the native endian
+ * format of name.
+ * For example in a little-endian system, first looks for "gray16",
+ * then for "gray16le".
+ *
+ * Finally if no pixel format has been found, returns PIX_FMT_NONE.
+ */
+enum PixelFormat av_get_pix_fmt(const char *name);
+
+/**
+ * Return the short name for a pixel format, NULL in case pix_fmt is
+ * unknown.
+ *
+ * @see av_get_pix_fmt(), av_get_pix_fmt_string()
+ */
+const char *av_get_pix_fmt_name(enum PixelFormat pix_fmt);
+
+/**
+ * Print in buf the string corresponding to the pixel format with
+ * number pix_fmt, or an header if pix_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param pix_fmt the number of the pixel format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ */
+char *av_get_pix_fmt_string (char *buf, int buf_size, enum PixelFormat pix_fmt);
+
+/**
+ * Return the number of bits per pixel used by the pixel format
+ * described by pixdesc.
+ *
+ * The returned number of bits refers to the number of bits actually
+ * used for storing the pixel information, that is padding bits are
+ * not counted.
+ */
+int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+#endif /* AVUTIL_PIXDESC_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/pixfmt.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/pixfmt.h
new file mode 100644
index 000000000..bd898bdc8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/pixfmt.h
@@ -0,0 +1,198 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ *
+ */
+
+#include "libavutil/avconfig.h"
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @par
+ * When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ *
+ * @note
+ * make sure that all newly added big endian formats have pix_fmt&1==1
+ * and that all newly added little endian formats have pix_fmt&1==0
+ * this allows simpler detection of big vs little endian.
+ */
+enum PixelFormat {
+ PIX_FMT_NONE= -1,
+ PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ PIX_FMT_GRAY8, ///< Y , 8bpp
+ PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+ PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ PIX_FMT_XVMC_MPEG2_IDCT,
+ PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+ PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ PIX_FMT_Y400A, ///< 8bit gray, 8bit alpha
+ PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+ PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+ PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian
+ PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian
+ PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian
+ PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian
+ PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian
+ PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+};
+
+#if AV_HAVE_BIGENDIAN
+# define PIX_FMT_NE(be, le) PIX_FMT_##be
+#else
+# define PIX_FMT_NE(be, le) PIX_FMT_##le
+#endif
+
+#define PIX_FMT_RGB32 PIX_FMT_NE(ARGB, BGRA)
+#define PIX_FMT_RGB32_1 PIX_FMT_NE(RGBA, ABGR)
+#define PIX_FMT_BGR32 PIX_FMT_NE(ABGR, RGBA)
+#define PIX_FMT_BGR32_1 PIX_FMT_NE(BGRA, ARGB)
+
+#define PIX_FMT_GRAY16 PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define PIX_FMT_RGB48 PIX_FMT_NE(RGB48BE, RGB48LE)
+#define PIX_FMT_RGB565 PIX_FMT_NE(RGB565BE, RGB565LE)
+#define PIX_FMT_RGB555 PIX_FMT_NE(RGB555BE, RGB555LE)
+#define PIX_FMT_RGB444 PIX_FMT_NE(RGB444BE, RGB444LE)
+#define PIX_FMT_BGR48 PIX_FMT_NE(BGR48BE, BGR48LE)
+#define PIX_FMT_BGR565 PIX_FMT_NE(BGR565BE, BGR565LE)
+#define PIX_FMT_BGR555 PIX_FMT_NE(BGR555BE, BGR555LE)
+#define PIX_FMT_BGR444 PIX_FMT_NE(BGR444BE, BGR444LE)
+
+#define PIX_FMT_YUV420P9 PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define PIX_FMT_YUV422P9 PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define PIX_FMT_YUV444P9 PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+#define PIX_FMT_YUV420P10 PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define PIX_FMT_YUV422P10 PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define PIX_FMT_YUV444P10 PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define PIX_FMT_YUV420P16 PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define PIX_FMT_YUV422P16 PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define PIX_FMT_YUV444P16 PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define PIX_FMT_GBRP9 PIX_FMT_NE(GBRP9BE , GBRP9LE)
+#define PIX_FMT_GBRP10 PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define PIX_FMT_GBRP16 PIX_FMT_NE(GBRP16BE, GBRP16LE)
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/random_seed.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/random_seed.h
new file mode 100644
index 000000000..b1fad13d0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/random_seed.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RANDOM_SEED_H
+#define AVUTIL_RANDOM_SEED_H
+
+#include <stdint.h>
+/**
+ * @addtogroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Get random data.
+ *
+ * This function can be called repeatedly to generate more random bits
+ * as needed. It is generally quite slow, and usually used to seed a
+ * PRNG. As it uses /dev/urandom and /dev/random, the quality of the
+ * returned random data depends on the platform.
+ */
+uint32_t av_get_random_seed(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RANDOM_SEED_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/rational.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/rational.h
new file mode 100644
index 000000000..0ec18ec96
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/rational.h
@@ -0,0 +1,144 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rational numbers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+/**
+ * rational number numerator/denominator
+ */
+typedef struct AVRational{
+ int num; ///< numerator
+ int den; ///< denominator
+} AVRational;
+
+/**
+ * Compare two rationals.
+ * @param a first rational
+ * @param b second rational
+ * @return 0 if a==b, 1 if a>b, -1 if a<b, and INT_MIN if one of the
+ * values is of the form 0/0
+ */
+static inline int av_cmp_q(AVRational a, AVRational b){
+ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+ if(tmp) return ((tmp ^ a.den ^ b.den)>>63)|1;
+ else if(b.den && a.den) return 0;
+ else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
+ else return INT_MIN;
+}
+
+/**
+ * Convert rational to double.
+ * @param a rational to convert
+ * @return (double) a
+ */
+static inline double av_q2d(AVRational a){
+ return a.num / (double) a.den;
+}
+
+/**
+ * Reduce a fraction.
+ * This is useful for framerate calculations.
+ * @param dst_num destination numerator
+ * @param dst_den destination denominator
+ * @param num source numerator
+ * @param den source denominator
+ * @param max the maximum allowed for dst_num & dst_den
+ * @return 1 if exact, 0 otherwise
+ */
+int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b first rational
+ * @param c second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b first rational
+ * @param c second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Convert a double precision floating point number to a rational.
+ * inf is expressed as {1,0} or {-1,0} depending on the sign.
+ *
+ * @param d double to convert
+ * @param max the maximum allowed numerator and denominator
+ * @return (AVRational) d
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer
+ * than q1, 0 if they have the same distance.
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the nearest value in q_list to q.
+ * @param q_list an array of rationals terminated by {0, 0}
+ * @return the index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/samplefmt.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/samplefmt.h
new file mode 100644
index 000000000..b6715561d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/samplefmt.h
@@ -0,0 +1,148 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include "avutil.h"
+
+/**
+ * all in native-endian format
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char *name);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);
+
+#if FF_API_GET_BITS_PER_SAMPLE_FMT
+/**
+ * @deprecated Use av_get_bytes_per_sample() instead.
+ */
+attribute_deprecated
+int av_get_bits_per_sample_fmt(enum AVSampleFormat sample_fmt);
+#endif
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Fill channel data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The pointers array is filled with the pointers to the samples data:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The linesize array is filled with the aligned size of each channel's data
+ * buffer for planar layout, or the aligned size of the buffer for all channels
+ * for packed layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (1 = no alignment required)
+ * @return 0 on success or a negative error code on failure
+ */
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, uint8_t *buf,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s)
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param align buffer size alignment (1 = no alignment required)
+ * @return 0 on success or a negative error code on failure
+ * @see av_samples_fill_arrays()
+ */
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/sha.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/sha.h
new file mode 100644
index 000000000..8350954c4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/sha.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA_H
+#define AVUTIL_SHA_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_sha SHA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_sha_size;
+
+struct AVSHA;
+
+/**
+ * Initialize SHA-1 or SHA-2 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha_size)
+ * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits)
+ * @return zero if initialization succeeded, -1 otherwise
+ */
+int av_sha_init(struct AVSHA* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data input data to update hash with
+ * @param len input data length
+ */
+void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest buffer where output digest value is stored
+ */
+void av_sha_final(struct AVSHA* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/time.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/time.h
new file mode 100644
index 000000000..90eb43694
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/time.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_H
+#define AVUTIL_TIME_H
+
+#include <stdint.h>
+
+/**
+ * Get the current time in microseconds.
+ */
+int64_t av_gettime(void);
+
+/**
+ * Sleep for a period of time. Although the duration is expressed in
+ * microseconds, the actual delay may be rounded to the precision of the
+ * system timer.
+ *
+ * @param usec Number of microseconds to sleep.
+ * @return zero on success or (negative) error code.
+ */
+int av_usleep(unsigned usec);
+
+#endif /* AVUTIL_TIME_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/timecode.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/timecode.h
new file mode 100644
index 000000000..56e3975fd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/timecode.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Timecode helpers header
+ */
+
+#ifndef AVUTIL_TIMECODE_H
+#define AVUTIL_TIMECODE_H
+
+#include <stdint.h>
+#include "rational.h"
+
+#define AV_TIMECODE_STR_SIZE 16
+
+enum AVTimecodeFlag {
+ AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame
+ AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours
+ AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed
+};
+
+typedef struct {
+ int start; ///< timecode frame start (first base frame number)
+ uint32_t flags; ///< flags such as drop frame, +24 hours support, ...
+ AVRational rate; ///< frame rate in rational form
+ unsigned fps; ///< frame per second; must be consistent with the rate field
+} AVTimecode;
+
+/**
+ * Adjust frame number for NTSC drop frame time code.
+ *
+ * @param framenum frame number to adjust
+ * @param fps frame per second, 30 or 60
+ * @return adjusted frame number
+ * @warning adjustment is only valid in NTSC 29.97 and 59.94
+ */
+int av_timecode_adjust_ntsc_framenum2(int framenum, int fps);
+
+/**
+ * Convert frame number to SMPTE 12M binary representation.
+ *
+ * @param tc timecode data correctly initialized
+ * @param framenum frame number
+ * @return the SMPTE binary representation
+ *
+ * @note Frame number adjustment is automatically done in case of drop timecode,
+ * you do NOT have to call av_timecode_adjust_ntsc_framenum2().
+ * @note The frame number is relative to tc->start.
+ * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity
+ * correction (PC) bits are set to zero.
+ */
+uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum);
+
+/**
+ * Load timecode string in buf.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc timecode data correctly initialized
+ * @param framenum frame number
+ * @return the buf parameter
+ *
+ * @note Timecode representation can be a negative timecode and have more than
+ * 24 hours, but will only be honored if the flags are correctly set.
+ * @note The frame number is relative to tc->start.
+ */
+char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum);
+
+/**
+ * Get the timecode string from the SMPTE timecode format.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tcsmpte the 32-bit SMPTE timecode
+ * @param prevent_df prevent the use of a drop flag when it is known the DF bit
+ * is arbitrary
+ * @return the buf parameter
+ */
+char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df);
+
+/**
+ * Get the timecode string from the 25-bit timecode format (MPEG GOP format).
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc25bit the 25-bits timecode
+ * @return the buf parameter
+ */
+char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit);
+
+/**
+ * Init a timecode struct with the passed parameters.
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field
+ * is a pointer to an AVClass struct (used for av_log)
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param flags miscellaneous flags such as drop frame, +24 hours, ...
+ * (see AVTimecodeFlag)
+ * @param frame_start the first frame number
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx);
+
+/**
+ * Parse timecode representation (hh:mm:ss[:;.]ff).
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct (used for av_log).
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param str timecode string which will determine the frame start
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx);
+
+/**
+ * Check if the timecode feature is available for the given frame rate
+ *
+ * @return 0 if supported, <0 otherwise
+ */
+int av_timecode_check_frame_rate(AVRational rate);
+
+#endif /* AVUTIL_TIMECODE_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/timestamp.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/timestamp.h
new file mode 100644
index 000000000..c7348d8f1
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/timestamp.h
@@ -0,0 +1,74 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * timestamp utils, mostly useful for debugging/logging purposes
+ */
+
+#ifndef AVUTIL_TIMESTAMP_H
+#define AVUTIL_TIMESTAMP_H
+
+#include "common.h"
+
+#define AV_TS_MAX_STRING_SIZE 32
+
+/**
+ * Fill the provided buffer with a string containing a timestamp
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_string(char *buf, int64_t ts)
+{
+ if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+ else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64"", ts);
+ return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts)
+
+/**
+ * Fill the provided buffer with a string containing a timestamp time
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @param tb the timebase of the timestamp
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb)
+{
+ if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+ else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts);
+ return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb)
+
+#endif /* AVUTIL_TIMESTAMP_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/version.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/version.h
new file mode 100644
index 000000000..4ad244f40
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/version.h
@@ -0,0 +1,132 @@
+/*
+ * copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+/**
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup version_utils Library Version Macros
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c)
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * @}
+ */
+
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 52
+#define LIBAVUTIL_VERSION_MINOR 5
+#define LIBAVUTIL_VERSION_MICRO 100
+
+#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @}
+ *
+ * @defgroup depr_guards Deprecation guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @{
+ */
+
+#ifndef FF_API_OLD_EVAL_NAMES
+#define FF_API_OLD_EVAL_NAMES (LIBAVUTIL_VERSION_MAJOR < 52)
+#endif
+#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT
+#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_FIND_OPT
+#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_OLD_AVOPTIONS
+#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_PIX_FMT
+#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_CONTEXT_SIZE
+#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_PIX_FMT_DESC
+#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_AV_REVERSE
+#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
+
diff --git a/dom/media/platforms/ffmpeg/libav53/include/libavutil/xtea.h b/dom/media/platforms/ffmpeg/libav53/include/libavutil/xtea.h
new file mode 100644
index 000000000..0899c92bc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/include/libavutil/xtea.h
@@ -0,0 +1,62 @@
+/*
+ * A 32-bit implementation of the XTEA algorithm
+ * Copyright (c) 2012 Samuel Pitoiset
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_XTEA_H
+#define AVUTIL_XTEA_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_xtea XTEA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+typedef struct AVXTEA {
+ uint32_t key[16];
+} AVXTEA;
+
+/**
+ * Initialize an AVXTEA context.
+ *
+ * @param ctx an AVXTEA context
+ * @param key a key of 16 bytes used for encryption/decryption
+ */
+void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVXTEA context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_XTEA_H */
diff --git a/dom/media/platforms/ffmpeg/libav53/moz.build b/dom/media/platforms/ffmpeg/libav53/moz.build
new file mode 100644
index 000000000..2bdc1dc34
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav53/moz.build
@@ -0,0 +1,23 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ '../FFmpegAudioDecoder.cpp',
+ '../FFmpegDataDecoder.cpp',
+ '../FFmpegDecoderModule.cpp',
+ '../FFmpegVideoDecoder.cpp',
+]
+LOCAL_INCLUDES += [
+ '..',
+ 'include',
+]
+
+FINAL_LIBRARY = 'xul'
+
+if CONFIG['CLANG_CXX']:
+ CXXFLAGS += [
+ '-Wno-unknown-attributes',
+ ]
diff --git a/dom/media/platforms/ffmpeg/libav54/include/COPYING.LGPLv2.1 b/dom/media/platforms/ffmpeg/libav54/include/COPYING.LGPLv2.1
new file mode 100644
index 000000000..00b4fedfe
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/COPYING.LGPLv2.1
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/avcodec.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/avcodec.h
new file mode 100644
index 000000000..e6b8ec626
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/avcodec.h
@@ -0,0 +1,4658 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * external API header
+ */
+
+#include <errno.h>
+#include "libavutil/samplefmt.h"
+#include "libavutil/avutil.h"
+#include "libavutil/cpu.h"
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "libavcodec/version.h"
+/**
+ * @defgroup libavc Encoding/Decoding Library
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ *
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of a existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs.
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ AV_CODEC_ID_MPEG2VIDEO_XVMC,
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_SNOW,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+ AV_CODEC_ID_IFF_BYTERUN1,
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+ AV_CODEC_ID_VOXWARE,
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK,
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+
+ AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+
+#if FF_API_CODEC_ID
+#include "old_codec_ids.h"
+#endif
+};
+
+#if FF_API_CODEC_ID
+#define CodecID AVCodecID
+#endif
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_get_descriptor()
+ */
+typedef struct AVCodecDescriptor {
+ enum AVCodecID id;
+ enum AVMediaType type;
+ /**
+ * Name of the codec described by this descriptor. It is non-empty and
+ * unique for each codec descriptor. It should contain alphanumeric
+ * characters and '_' only.
+ */
+ const char *name;
+ /**
+ * A more descriptive name for this codec. May be NULL.
+ */
+ const char *long_name;
+ /**
+ * Codec properties, a combination of AV_CODEC_PROP_* flags.
+ */
+ int props;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS (1 << 2)
+
+#if FF_API_OLD_DECODE_AUDIO
+/* in bytes */
+#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
+#endif
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define FF_INPUT_BUFFER_PADDING_SIZE 8
+
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define FF_MIN_BUFFER_SIZE 16384
+
+
+/**
+ * @ingroup lavc_encoding
+ * motion estimation type.
+ */
+enum Motion_Est_ID {
+ ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed
+ ME_FULL,
+ ME_LOG,
+ ME_PHODS,
+ ME_EPZS, ///< enhanced predictive zonal search
+ ME_X1, ///< reserved for experiments
+ ME_HEX, ///< hexagon based search
+ ME_UMH, ///< uneven multi-hexagon search
+ ME_ITER, ///< iterative search
+ ME_TESA, ///< transformed exhaustive search algorithm
+};
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVColorPrimaries{
+ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_BT470M = 4,
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
+ AVCOL_PRI_FILM = 8,
+ AVCOL_PRI_NB , ///< Not part of ABI
+};
+
+enum AVColorTransferCharacteristic{
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_NB , ///< Not part of ABI
+};
+
+enum AVColorSpace{
+ AVCOL_SPC_RGB = 0,
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_FCC = 4,
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M = 7,
+ AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_NB , ///< Not part of ABI
+};
+
+enum AVColorRange{
+ AVCOL_RANGE_UNSPECIFIED = 0,
+ AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+ AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
+ AVCOL_RANGE_NB , ///< Not part of ABI
+};
+
+/**
+ * X X 3 4 X X are luma samples,
+ * 1 2 1-6 are possible chroma positions
+ * X X 5 6 X 0 is undefined/unknown position
+ */
+enum AVChromaLocation{
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default
+ AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263
+ AVCHROMA_LOC_TOPLEFT = 3, ///< DV
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB , ///< Not part of ABI
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
+};
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride{
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+#define FF_MAX_B_FRAMES 16
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale.
+#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263.
+#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC.
+#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
+#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>.
+/**
+ * The parent program guarantees that the input for B-frames containing
+ * streams is not written to for at least s->max_b_frames+1 frames, if
+ * this is not set the input will be copied.
+ */
+#define CODEC_FLAG_INPUT_PRESERVED 0x0100
+#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode.
+#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode.
+#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale.
+#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges.
+#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding.
+#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random
+ location instead of only at frame boundaries. */
+#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization.
+#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.
+#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay.
+#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe.
+#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT).
+/* Fx : Flag for h263+ extra options */
+#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
+#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
+#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
+#define CODEC_FLAG_CLOSED_GOP 0x80000000
+#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks.
+#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
+#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
+#if FF_API_MPV_GLOBAL_OPTS
+#define CODEC_FLAG_CBP_RD 0x04000000 ///< Use rate distortion optimization for cbp.
+#define CODEC_FLAG_QP_RD 0x08000000 ///< Use rate distortion optimization for qp selectioon.
+#define CODEC_FLAG2_STRICT_GOP 0x00000002 ///< Strictly enforce GOP size.
+#define CODEC_FLAG2_SKIP_RD 0x00004000 ///< RD optimal MB level residual skipping
+#endif
+#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
+
+/* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.
+/**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+#define CODEC_CAP_DR1 0x0002
+#define CODEC_CAP_TRUNCATED 0x0008
+/* Codec can export data for HW decoding (XvMC). */
+#define CODEC_CAP_HWACCEL 0x0010
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define CODEC_CAP_DELAY 0x0020
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define CODEC_CAP_SMALL_LAST_FRAME 0x0040
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define CODEC_CAP_HWACCEL_VDPAU 0x0080
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define CODEC_CAP_SUBFRAMES 0x0100
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define CODEC_CAP_EXPERIMENTAL 0x0200
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define CODEC_CAP_CHANNEL_CONF 0x0400
+/**
+ * Codec is able to deal with negative linesizes
+ */
+#define CODEC_CAP_NEG_LINESIZES 0x0800
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define CODEC_CAP_FRAME_THREADS 0x1000
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define CODEC_CAP_SLICE_THREADS 0x2000
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define CODEC_CAP_PARAM_CHANGE 0x4000
+/**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+#define CODEC_CAP_AUTO_THREADS 0x8000
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
+
+//The following defines may change, don't expect compatibility if you use them.
+#define MB_TYPE_INTRA4x4 0x0001
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific
+#define MB_TYPE_16x16 0x0008
+#define MB_TYPE_16x8 0x0010
+#define MB_TYPE_8x16 0x0020
+#define MB_TYPE_8x8 0x0040
+#define MB_TYPE_INTERLACED 0x0080
+#define MB_TYPE_DIRECT2 0x0100 //FIXME
+#define MB_TYPE_ACPRED 0x0200
+#define MB_TYPE_GMC 0x0400
+#define MB_TYPE_SKIP 0x0800
+#define MB_TYPE_P0L0 0x1000
+#define MB_TYPE_P1L0 0x2000
+#define MB_TYPE_P0L1 0x4000
+#define MB_TYPE_P1L1 0x8000
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT 0x00010000
+#define MB_TYPE_CBP 0x00020000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan{
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+}AVPanScan;
+
+#define FF_QSCALE_TYPE_MPEG1 0
+#define FF_QSCALE_TYPE_MPEG2 1
+#define FF_QSCALE_TYPE_H264 2
+#define FF_QSCALE_TYPE_VP56 3
+
+#define FF_BUFFER_TYPE_INTERNAL 1
+#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
+#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
+#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.
+
+#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore).
+#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
+#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
+#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+ AV_PKT_DATA_PALETTE,
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+};
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames.
+ *
+ * AVPacket is one of the few structs in Libav, whose size is a part of public
+ * ABI. Thus it may be allocated on stack and no new fields can be added to it
+ * without libavcodec and libavformat major bump.
+ *
+ * The semantics of data ownership depends on the destruct field.
+ * If it is set, the packet data is dynamically allocated and is valid
+ * indefinitely until av_free_packet() is called (which in turn calls the
+ * destruct callback to free the data). If destruct is not set, the packet data
+ * is typically backed by some static buffer somewhere and is only valid for a
+ * limited time (e.g. until the next read call when demuxing).
+ *
+ * The side data is always allocated with av_malloc() and is freed in
+ * av_free_packet().
+ */
+typedef struct AVPacket {
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t *data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ struct {
+ uint8_t *data;
+ int size;
+ enum AVPacketSideDataType type;
+ } *side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int duration;
+ void (*destruct)(struct AVPacket *);
+ void *priv;
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * Time difference in AVStream->time_base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current packet.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+} AVPacket;
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+
+enum AVSideDataParamChangeFlags {
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+/**
+ * @}
+ */
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using avcodec_alloc_frame() and freed with
+ * avcodec_free_frame(). Note that this allocates only the AVFrame itself. The
+ * buffers for the data must be managed through other means.
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, avcodec_get_frame_defaults() should be used to
+ * reset the frame to its original clean state before it is reused again.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte
+ * - encoding: Set by user
+ * - decoding: set by AVCodecContext.get_buffer()
+ */
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * Size, in bytes, of the data for each picture/channel plane.
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * - encoding: Set by user
+ * - decoding: set by AVCodecContext.get_buffer()
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data will always be set by get_buffer(),
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used by the decoder in order to access all
+ * channels.
+ *
+ * encoding: set by user
+ * decoding: set by AVCodecContext.get_buffer()
+ */
+ uint8_t **extended_data;
+
+ /**
+ * width and height of the video frame
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int width, height;
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ int nb_samples;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum AVPixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int format;
+
+ /**
+ * 1 -> keyframe, 0-> not
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame, see ?_TYPE below.
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
+ * - decoding: Set by libavcodec.
+ */
+ enum AVPictureType pict_type;
+
+ /**
+ * pointer to the first allocated byte of the picture. Can be used in get_buffer/release_buffer.
+ * This isn't used by libavcodec unless the default get/release_buffer() is used.
+ * - encoding:
+ * - decoding:
+ */
+ uint8_t *base[AV_NUM_DATA_POINTERS];
+
+ /**
+ * sample aspect ratio for the video frame, 0/1 if unknown/unspecified
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * presentation timestamp in time_base units (time when frame should be shown to user)
+ * If AV_NOPTS_VALUE then frame_rate = 1/time_base will be assumed.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int64_t pts;
+
+ /**
+ * pts copied from the AVPacket that was decoded to produce this frame
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_pts;
+
+ /**
+ * dts copied from the AVPacket that triggered returning this frame
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * picture number in bitstream order
+ * - encoding: set by
+ * - decoding: Set by libavcodec.
+ */
+ int coded_picture_number;
+ /**
+ * picture number in display order
+ * - encoding: set by
+ * - decoding: Set by libavcodec.
+ */
+ int display_picture_number;
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ * - encoding: Set by libavcodec. for coded_picture (and set by user for input).
+ * - decoding: Set by libavcodec.
+ */
+ int quality;
+
+ /**
+ * is this picture used as reference
+ * The values for this are the same as the MpegEncContext.picture_structure
+ * variable, that is 1->top field, 2->bottom field, 3->frame/both fields.
+ * Set to 4 for delayed, non-reference frames.
+ * - encoding: unused
+ * - decoding: Set by libavcodec. (before get_buffer() call)).
+ */
+ int reference;
+
+ /**
+ * QP table
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int8_t *qscale_table;
+ /**
+ * QP store stride
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int qstride;
+
+ /**
+ *
+ */
+ int qscale_type;
+
+ /**
+ * mbskip_table[mb]>=1 if MB didn't change
+ * stride= mb_width = (width+15)>>4
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ uint8_t *mbskip_table;
+
+ /**
+ * motion vector table
+ * @code
+ * example:
+ * int mv_sample_log2= 4 - motion_subsample_log2;
+ * int mb_width= (width+15)>>4;
+ * int mv_stride= (mb_width << mv_sample_log2) + 1;
+ * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
+ * @endcode
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t (*motion_val[2])[2];
+
+ /**
+ * macroblock type table
+ * mb_type_base + mb_width + 2
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ uint32_t *mb_type;
+
+ /**
+ * DCT coefficients
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ short *dct_coeff;
+
+ /**
+ * motion reference frame index
+ * the order in which these are stored can depend on the codec.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int8_t *ref_index[2];
+
+ /**
+ * for some private data of the user
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec. if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * type of the buffer (to keep track of who has to deallocate data[*])
+ * - encoding: Set by the one who allocates it.
+ * - decoding: Set by the one who allocates it.
+ * Note: User allocated (direct rendering) & internal buffers cannot coexist currently.
+ */
+ int type;
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ int repeat_pict;
+
+ /**
+ * The content of the picture is interlaced.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec. (default 0)
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int top_field_first;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ * - encoding: ??? (no palette-enabled encoder yet)
+ * - decoding: Set by libavcodec. (default 0).
+ */
+ int palette_has_changed;
+
+ /**
+ * codec suggestion on buffer type if != 0
+ * - encoding: unused
+ * - decoding: Set by libavcodec. (before get_buffer() call)).
+ */
+ int buffer_hints;
+
+ /**
+ * Pan scan.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVPanScan *pan_scan;
+
+ /**
+ * reordered opaque 64bit (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * hardware accelerator private data (Libav-allocated)
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ void *hwaccel_picture_private;
+
+ /**
+ * the AVCodecContext which ff_thread_get_buffer() was last called on
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ struct AVCodecContext *owner;
+
+ /**
+ * used by multithreading to store frame-specific info
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ void *thread_opaque;
+
+ /**
+ * log2 of the size of the block which a single vector in motion_val represents:
+ * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
+ * - encoding: unused
+ * - decoding: Set by libavcodec.
+ */
+ uint8_t motion_subsample_log2;
+
+ /**
+ * Sample rate of the audio data.
+ *
+ * - encoding: unused
+ * - decoding: set by get_buffer()
+ */
+ int sample_rate;
+
+ /**
+ * Channel layout of the audio data.
+ *
+ * - encoding: unused
+ * - decoding: set by get_buffer()
+ */
+ uint64_t channel_layout;
+} AVFrame;
+
+struct AVCodecInternal;
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, //< Top coded_first, top displayed first
+ AV_FIELD_BB, //< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, //< Top coded first, bottom displayed first
+ AV_FIELD_BT, //< Bottom coded first, top displayed first
+};
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass *av_class;
+ int log_level_offset;
+
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ const struct AVCodec *codec;
+ char codec_name[32];
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify the codec.
+ * If there are multiple such fields in a container then the demuxer should choose the one
+ * which maximizes the information about the used codec.
+ * If the codec tag field in a container is larger than 32 bits then the demuxer should
+ * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+ * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+ * first.
+ * - encoding: Set by user, if not then the default based on codec_id will be used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int codec_tag;
+
+ /**
+ * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * - encoding: unused
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int stream_codec_tag;
+
+#if FF_API_SUB_ID
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated int sub_id;
+#endif
+
+ void *priv_data;
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal *internal;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.
+ */
+ int bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * mjpeg: Huffman tables
+ * rv10: additional flags
+ * mpeg4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid prolems if it is read with the bitstream reader.
+ * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t *extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational time_base;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Codec delay.
+ *
+ * Video:
+ * Number of frames the decoded output will be delayed relative to the
+ * encoded input.
+ *
+ * Audio:
+ * For encoding, this is the number of "priming" samples added to the
+ * beginning of the stream. The decoded output will be delayed by this
+ * many samples relative to the input to the encoder. Note that this
+ * field is purely informational and does not directly affect the pts
+ * output by the encoder, which should always be based on the actual
+ * presentation time, including any delay.
+ * For decoding, this is the number of samples the decoder needs to
+ * output before the decoder's output is valid. When seeking, you should
+ * start decoding this many samples prior to your desired seek point.
+ *
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int delay;
+
+
+ /* video only */
+ /**
+ * picture width / height.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ * Note: For compatibility it is possible to set this instead of
+ * coded_width/height before decoding.
+ */
+ int width, height;
+
+ /**
+ * Bitstream width / height, may be different from width/height.
+ * - encoding: unused
+ * - decoding: Set by user before init if known. Codec should override / dynamically change if needed.
+ */
+ int coded_width, coded_height;
+
+#define FF_ASPECT_EXTENDED 15
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see AV_PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overriden by the decoder if it knows better.
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec if known
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Motion estimation algorithm used for video coding.
+ * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),
+ * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]
+ * - encoding: MUST be set by user.
+ * - decoding: unused
+ */
+ int me_method;
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext *s,
+ const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
+ int y, int type, int height);
+
+ /**
+ * callback to negotiate the pixelFormat
+ * @param fmt is the list of formats which are supported by the codec,
+ * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
+ * The first is always the native one.
+ * @return the chosen format
+ * - encoding: unused
+ * - decoding: Set by user, if not set the native format will be chosen.
+ */
+ enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /** obsolete FIXME remove */
+ int rc_strategy;
+#define FF_RC_STRATEGY_XVID 1
+
+ int b_frame_strategy;
+
+#if FF_API_MPV_GLOBAL_OPTS
+ /**
+ * luma single coefficient elimination threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int luma_elim_threshold;
+
+ /**
+ * chroma single coeff elimination threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int chroma_elim_threshold;
+#endif
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * 0-> h263 quant 1-> mpeg quant
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mpeg_quant;
+
+ /**
+ * qscale factor between P and I-frames
+ * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+ /**
+ * prediction method (needed for huffyuv)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int prediction_method;
+#define FF_PRED_LEFT 0
+#define FF_PRED_PLANE 1
+#define FF_PRED_MEDIAN 2
+
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int *slice_offset;
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * prepass for motion estimation
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_me;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+ /**
+ * DTG active format information (additional aspect ratio
+ * information only used in DVB MPEG-2 transport streams)
+ * 0 if not set.
+ *
+ * - encoding: unused
+ * - decoding: Set by decoder.
+ */
+ int dtg_active_format;
+#define FF_DTG_AFD_SAME 8
+#define FF_DTG_AFD_4_3 9
+#define FF_DTG_AFD_16_9 10
+#define FF_DTG_AFD_14_9 11
+#define FF_DTG_AFD_4_3_SP_14_9 13
+#define FF_DTG_AFD_16_9_SP_14_9 14
+#define FF_DTG_AFD_SP_4_3 15
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * intra quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_quant_bias;
+#define FF_DEFAULT_QUANT_BIAS 999999
+
+ /**
+ * inter quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int inter_quant_bias;
+
+#if FF_API_COLOR_TABLE_ID
+ /**
+ * color table ID
+ * - encoding: unused
+ * - decoding: Which clrtable should be used for 8bit RGB images.
+ * Tables have to be stored somewhere. FIXME
+ */
+ attribute_deprecated int color_table_id;
+#endif
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+ /**
+ * XVideo Motion Acceleration
+ * - encoding: forbidden
+ * - decoding: set by decoder
+ */
+ int xvmc_acceleration;
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *inter_matrix;
+
+ /**
+ * scene change detection threshold
+ * 0 is default, larger means fewer detected scene changes.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_threshold;
+
+ /**
+ * noise reduction strength
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int noise_reduction;
+
+#if FF_API_INTER_THRESHOLD
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated int inter_threshold;
+#endif
+
+#if FF_API_MPV_GLOBAL_OPTS
+ /**
+ * @deprecated use mpegvideo private options instead
+ */
+ attribute_deprecated int quantizer_noise_shaping;
+#endif
+
+ /**
+ * Motion estimation threshold below which no motion estimation is
+ * performed, but instead the user specified motion vectors are used.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_threshold;
+
+ /**
+ * Macroblock threshold below which the user specified macroblock types will be used.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_threshold;
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_dc_precision;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+ /**
+ * Border processing masking, raises the quantizer for mbs on the borders
+ * of the picture.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float border_masking;
+
+ /**
+ * minimum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_penalty_compensation;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int brd_scale;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * chroma qp offset from luma
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chromaoffset;
+
+ /**
+ * Multiplied by qscale for each frame and added to scene_change_score.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_factor;
+
+ /**
+ *
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Adjust sensitivity of b_frame_strategy 1.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int b_sensitivity;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by libavcodec
+ */
+ enum AVFieldOrder field_order;
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+ int channels; ///< number of audio channels
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Number of samples per channel in an audio frame.
+ *
+ * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+ * except the last must contain exactly frame_size samples per channel.
+ * May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the
+ * frame size is not restricted.
+ * - decoding: may be set by some decoders to indicate constant frame size
+ */
+ int frame_size;
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int frame_number;
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+#if FF_API_REQUEST_CHANNELS
+ /**
+ * Decoder should decode to this many channels if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated Deprecated in favor of request_channel_layout.
+ */
+ int request_channels;
+#endif
+
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ uint64_t request_channel_layout;
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * Used to request a sample format from the decoder.
+ * - encoding: unused.
+ * - decoding: Set by user.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+ /**
+ * Called at the beginning of each frame to get a buffer for it.
+ *
+ * The function will set AVFrame.data[], AVFrame.linesize[].
+ * AVFrame.extended_data[] must also be set, but it should be the same as
+ * AVFrame.data[] except for planar audio with more channels than can fit
+ * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
+ * many data pointers as it can hold.
+ *
+ * if CODEC_CAP_DR1 is not set then get_buffer() must call
+ * avcodec_default_get_buffer() instead of providing buffers allocated by
+ * some other means.
+ *
+ * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
+ * need it. avcodec_default_get_buffer() aligns the output buffer properly,
+ * but if get_buffer() is overridden then alignment considerations should
+ * be taken into account.
+ *
+ * @see avcodec_default_get_buffer()
+ *
+ * Video:
+ *
+ * If pic.reference is set then the frame will be read later by libavcodec.
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * it may be called from a different thread, but not from more than one at
+ * once. Does not need to be reentrant.
+ *
+ * @see release_buffer(), reget_buffer()
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * Decoders cannot use the buffer after returning from
+ * avcodec_decode_audio4(), so they will not call release_buffer(), as it
+ * is assumed to be released immediately upon return. In some rare cases,
+ * a decoder may need to call get_buffer() more than once in a single
+ * call to avcodec_decode_audio4(). In that case, when get_buffer() is
+ * called again after it has already been called once, the previously
+ * acquired buffer is assumed to be released at that time and may not be
+ * reused by the decoder.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called to release buffers which were allocated with get_buffer.
+ * A released buffer can be reused in get_buffer().
+ * pic.data[*] must be set to NULL.
+ * May be called from a different thread if frame multithreading is used,
+ * but not by more than one thread at once, so does not need to be reentrant.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called at the beginning of a frame to get cr buffer for it.
+ * Buffer type (size, hints) must be the same. libavcodec won't check it.
+ * libavcodec will pass previous buffer in pic, function should return
+ * same buffer or new buffer with old frame "painted" into it.
+ * If pic.data[0] == NULL must behave like get_buffer().
+ * if CODEC_CAP_DR1 is not set then reget_buffer() must call
+ * avcodec_default_reget_buffer() instead of providing buffers allocated by
+ * some other means.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+ /**
+ * ratecontrol qmin qmax limiting method
+ * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_qsquish;
+
+ float rc_qmod_amp;
+ int rc_qmod_freq;
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ int rc_override_count;
+ RcOverride *rc_override;
+
+ /**
+ * rate control equation
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ const char *rc_eq;
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_min_rate;
+
+ float rc_buffer_aggressivity;
+
+ /**
+ * initial complexity for pass1 ratecontrol
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_initial_cplx;
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+#define FF_CODER_TYPE_VLC 0
+#define FF_CODER_TYPE_AC 1
+#define FF_CODER_TYPE_RAW 2
+#define FF_CODER_TYPE_RLE 3
+#define FF_CODER_TYPE_DEFLATE 4
+ /**
+ * coder type
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int coder_type;
+
+ /**
+ * context model
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int context_model;
+
+ /**
+ * minimum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmin;
+
+ /**
+ * maximum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmax;
+
+ /**
+ * frame skip threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_threshold;
+
+ /**
+ * frame skip factor
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_factor;
+
+ /**
+ * frame skip exponent
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_exp;
+
+ /**
+ * frame skip comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_cmp;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int min_prediction_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_prediction_order;
+
+ /**
+ * GOP timecode frame start number, in non drop frame format
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t timecode_frame_start;
+
+ /* The RTP callback: This function is called */
+ /* every time the encoder has a packet to send. */
+ /* It depends on the encoder if the data starts */
+ /* with a Start Code (it should). H.263 does. */
+ /* mb_nb contains the number of macroblocks */
+ /* encoded in the RTP payload. */
+ void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
+
+ int rtp_payload_size; /* The size of the RTP payload: the coder will */
+ /* do its best to deliver a chunk with size */
+ /* below rtp_payload_size, the chunk will start */
+ /* with a start code on some codecs like H.263. */
+ /* This doesn't take account of any particular */
+ /* headers inside the transmitted RTP payload. */
+
+ /* statistics, used for 2-pass encoding */
+ int mv_bits;
+ int header_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int i_count;
+ int p_count;
+ int skip_count;
+ int misc_bits;
+
+ /**
+ * number of bits used for the previously encoded frame
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ int frame_bits;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char *stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char *stats_in;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#define FF_BUG_OLD_MSMPEG4 2
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default.
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+
+ /**
+ * strictly follow the standard (MPEG4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ */
+ int strict_std_compliance;
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#define FF_DEBUG_MV 32
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#define FF_DEBUG_PTS 0x00000200
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#define FF_DEBUG_VIS_QP 0x00002000
+#define FF_DEBUG_VIS_MB_TYPE 0x00004000
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+#define AV_EF_CRCCHECK (1<<0)
+#define AV_EF_BITSTREAM (1<<1)
+#define AV_EF_BUFFER (1<<2)
+#define AV_EF_EXPLODE (1<<3)
+
+ /**
+ * opaque 64bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ struct AVHWAccel *hwaccel;
+
+ /**
+ * Hardware accelerator context.
+ * For some hardware accelerators, a global context needs to be
+ * provided by the user. In that case, this holds display-dependent
+ * data Libav cannot instantiate itself. Please refer to the
+ * Libav HW accelerator documentation to know how to fill this
+ * is. e.g. for VA API, this is a struct vaapi_context.
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *hwaccel_context;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#if FF_API_LIBMPEG2
+#define FF_IDCT_LIBMPEG2MMX 4
+#endif
+#if FF_API_MMI
+#define FF_IDCT_MMI 5
+#endif
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#define FF_IDCT_SH4 9
+#define FF_IDCT_SIMPLEARM 10
+#define FF_IDCT_H264 11
+#define FF_IDCT_VP3 12
+#define FF_IDCT_IPP 13
+#define FF_IDCT_XVIDMMX 14
+#define FF_IDCT_CAVS 15
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#define FF_IDCT_SIMPLEVIS 18
+#define FF_IDCT_WMV2 19
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_EA 21
+#define FF_IDCT_SIMPLENEON 22
+#define FF_IDCT_SIMPLEALPHA 23
+#define FF_IDCT_BINK 24
+
+#if FF_API_DSP_MASK
+ /**
+ * Unused.
+ * @deprecated use av_set_cpu_flags_mask() instead.
+ */
+ attribute_deprecated unsigned dsp_mask;
+#endif
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ attribute_deprecated int lowres;
+
+ /**
+ * the picture in the bitstream
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ AVFrame *coded_frame;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
+ * so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * Set by the client if its custom get_buffer() callback can be called
+ * synchronously from another thread, which allows faster multithreaded decoding.
+ * draw_horiz_band() will be called from other threads regardless of this setting.
+ * Ignored if the default get_buffer() is used.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_safe_callbacks;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to count-1.
+ * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+ * two instances of func executing at the same time will have the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement where when any call to func
+ * returns < 0 no further calls to func may be done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+ /**
+ * thread opaque
+ * Can be used by execute() to store some per AVCodecContext stuff.
+ * - encoding: set by execute()
+ * - decoding: set by execute()
+ */
+ void *thread_opaque;
+
+ /**
+ * noise vs. sse weight for the nsse comparsion function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+#define FF_PROFILE_AAC_HE 4
+#define FF_PROFILE_AAC_HE_V2 28
+#define FF_PROFILE_AAC_LD 22
+#define FF_PROFILE_AAC_ELD 38
+
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+
+#define FF_PROFILE_MPEG2_422 0
+#define FF_PROFILE_MPEG2_HIGH 1
+#define FF_PROFILE_MPEG2_SS 2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+#define FF_PROFILE_MPEG2_MAIN 4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE 66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN 77
+#define FF_PROFILE_H264_EXTENDED 88
+#define FF_PROFILE_H264_HIGH 100
+#define FF_PROFILE_H264_HIGH_10 110
+#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_422 122
+#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_444 144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444 44
+
+#define FF_PROFILE_VC1_SIMPLE 0
+#define FF_PROFILE_VC1_MAIN 1
+#define FF_PROFILE_VC1_COMPLEX 2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ uint8_t *subtitle_header;
+ int subtitle_header_size;
+
+ /**
+ * Simulates errors in the bitstream to test error concealment.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int error_rate;
+
+ /**
+ * Current packet as passed into the decoder, to avoid having
+ * to pass the packet into every function. Currently only valid
+ * inside lavc and get/release_buffer callbacks.
+ * - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts
+ * - encoding: unused
+ */
+ AVPacket *pkt;
+
+ /**
+ * VBV delay coded in the last frame (in periods of a 27 MHz clock).
+ * Used for compliant TS muxing.
+ * - encoding: Set by libavcodec.
+ * - decoding: unused.
+ */
+ uint64_t vbv_delay;
+} AVCodecContext;
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char *name; ///< short name for the profile
+} AVProfile;
+
+typedef struct AVCodecDefault AVCodecDefault;
+
+struct AVSubtitle;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see CODEC_CAP_*
+ */
+ int capabilities;
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+ const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+ attribute_deprecated uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+ const AVClass *priv_class; ///< AVClass for the private context
+ const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ int priv_data_size;
+ struct AVCodec *next;
+ /**
+ * @name Frame-level threading support functions
+ * @{
+ */
+ /**
+ * If defined, called on thread contexts when they are created.
+ * If the codec allocates writable tables in init(), re-allocate them here.
+ * priv_data will be set to a copy of the original.
+ */
+ int (*init_thread_copy)(AVCodecContext *);
+ /**
+ * Copy necessary context variables from a previous thread context to the current one.
+ * If not defined, the next thread will start automatically; otherwise, the codec
+ * must call ff_thread_finish_setup().
+ *
+ * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+ */
+ int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
+ /** @} */
+
+ /**
+ * Private codec-specific defaults.
+ */
+ const AVCodecDefault *defaults;
+
+ /**
+ * Initialize codec static data, called from avcodec_register().
+ */
+ void (*init_static_data)(struct AVCodec *codec);
+
+ int (*init)(AVCodecContext *);
+ int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,
+ const struct AVSubtitle *sub);
+ /**
+ * Encode data to an AVPacket.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket (may contain a user-provided buffer)
+ * @param[in] frame AVFrame containing the raw data to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
+ int *got_packet_ptr);
+ int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
+ int (*close)(AVCodecContext *);
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(AVCodecContext *);
+} AVCodec;
+
+/**
+ * AVHWAccel.
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char *name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See AV_CODEC_ID_xxx
+ */
+ enum AVCodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see FF_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ struct AVHWAccel *next;
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext *avctx);
+
+ /**
+ * Size of HW accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int priv_data_size;
+} AVHWAccel;
+
+/**
+ * @defgroup lavc_picture AVPicture
+ *
+ * Functions for working with AVPicture
+ * @{
+ */
+
+/**
+ * four components are given, that's all.
+ * the last component is alpha
+ */
+typedef struct AVPicture {
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+ int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
+} AVPicture;
+
+/**
+ * @}
+ */
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * can be set for text/ass as well once they where rendered
+ */
+ AVPicture pict;
+ enum AVSubtitleType type;
+
+ char *text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The pressentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char *ass;
+ int flags;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect **rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
+ */
+AVCodec *av_codec_next(const AVCodec *c);
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char *avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char *avcodec_license(void);
+
+/**
+ * Register the codec codec and initialize libavcodec.
+ *
+ * @warning either this function or avcodec_register_all() must be called
+ * before any other libavcodec functions.
+ *
+ * @see avcodec_register_all()
+ */
+void avcodec_register(AVCodec *codec);
+
+/**
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
+ *
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
+ */
+void avcodec_register_all(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct can be deallocated by calling avcodec_close() on it followed
+ * by av_free().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ */
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
+
+/**
+ * Set the fields of the given AVCodecContext to default values corresponding
+ * to the given codec (defaults may be codec-dependent).
+ *
+ * Do not call this function if a non-NULL codec has been passed
+ * to avcodec_alloc_context3() that allocated this AVCodecContext.
+ * If codec is non-NULL, it is illegal to call avcodec_open2() with a
+ * different codec on this AVCodecContext.
+ */
+int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_class(void);
+
+/**
+ * Copy the settings of the source AVCodecContext into the destination
+ * AVCodecContext. The resulting destination codec context will be
+ * unopened, i.e. you are required to call avcodec_open2() before you
+ * can use this AVCodecContext to decode/encode video/audio data.
+ *
+ * @param dest target codec context, should be initialized with
+ * avcodec_alloc_context3(), but otherwise uninitialized
+ * @param src source codec context
+ * @return AVERROR() on error (e.g. memory allocation error), 0 on success
+ */
+int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct must be freed using avcodec_free_frame().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ * @see avcodec_get_frame_defaults
+ */
+AVFrame *avcodec_alloc_frame(void);
+
+/**
+ * Set the fields of the given AVFrame to default values.
+ *
+ * @param frame The AVFrame of which the fields should be set to default values.
+ */
+void avcodec_get_frame_defaults(AVFrame *frame);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ *
+ * @warning this function does NOT free the data buffers themselves
+ * (it does not know how, since they might have been allocated with
+ * a custom get_buffer()).
+ */
+void avcodec_free_frame(AVFrame **frame);
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @code
+ * avcodec_register_all();
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * avcodec_get_context_defaults3() for this context, then this
+ * parameter MUST be either NULL or equal to the previously passed
+ * codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ * On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ * av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() /
+ * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
+ * do nothing.
+ */
+int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_packet
+ * @{
+ */
+
+/**
+ * Default packet destructor.
+ */
+void av_destruct_packet(AVPacket *pkt);
+
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ */
+void av_init_packet(AVPacket *pkt);
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated.
+ */
+int av_dup_packet(AVPacket *pkt);
+
+/**
+ * Free a packet.
+ *
+ * @param pkt packet to free
+ */
+void av_free_packet(AVPacket *pkt);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int *size);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder_by_name(const char *name);
+
+int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+
+/**
+ * Return the amount of padding in pixels which the get_buffer callback must
+ * provide around the edge of the image for codecs which do not have the
+ * CODEC_FLAG_EMU_EDGE flag.
+ *
+ * @return Required padding in pixels.
+ */
+unsigned avcodec_get_edge_width(void);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased
+ * according to avcodec_get_edge_width() before.
+ */
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased
+ * according to avcodec_get_edge_width() before.
+ */
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+#if FF_API_OLD_DECODE_AUDIO
+/**
+ * Wrapper function which calls avcodec_decode_audio4.
+ *
+ * @deprecated Use avcodec_decode_audio4 instead.
+ *
+ * Decode the audio frame of size avpkt->size from avpkt->data into samples.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio3 has to be called again with an AVPacket that contains
+ * the remaining data in order to decode the second frame etc.
+ * If no frame
+ * could be outputted, frame_size_ptr is zero. Otherwise, it is the
+ * decompressed frame size in bytes.
+ *
+ * @warning You must set frame_size_ptr to the allocated size of the
+ * output buffer before calling avcodec_decode_audio3().
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @warning You must not provide a custom get_buffer() when using
+ * avcodec_decode_audio3(). Doing so will override it with
+ * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead,
+ * which does allow the application to provide a custom get_buffer().
+ *
+ * @note You might have to align the input buffer avpkt->data and output buffer
+ * samples. The alignment requirements depend on the CPU: On some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance.
+ *
+ * In practice, avpkt->data should have 4 byte alignment at minimum and
+ * samples should be 16 byte aligned unless the CPU doesn't need it
+ * (AltiVec and SSE do).
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] samples the output buffer, sample type in avctx->sample_fmt
+ * If the sample format is planar, each channel plane will
+ * be the same size, with no padding between channels.
+ * @param[in,out] frame_size_ptr the output buffer size in bytes
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields.
+ * All decoders are designed to use the least fields possible though.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame data was decompressed (used) from the input AVPacket.
+ */
+attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
+ int *frame_size_ptr,
+ AVPacket *avpkt);
+#endif
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio4 has to be called again with an AVPacket containing
+ * the remaining data in order to decode the second frame, etc...
+ * Even if no frames are returned, the packet needs to be fed to the decoder
+ * with remaining data until it is completely consumed or an error occurs.
+ *
+ * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @note You might have to align the input buffer. The alignment requirements
+ * depend on the CPU and the decoder.
+ *
+ * @param avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The
+ * decoder may, however, only utilize part of the buffer by
+ * setting AVFrame.nb_samples to a smaller value in the
+ * output frame.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ * non-zero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * At least avpkt->data and avpkt->size should be set. Some
+ * decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ * decoding, otherwise the number of bytes consumed from the input
+ * AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt);
+
+/**
+ * Decode the video frame of size avpkt->size from avpkt->data into picture.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame.
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note You might have to align the input buffer avpkt->data.
+ * The alignment requirements depend on the CPU: on some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance.
+ *
+ * In practice, avpkt->data should have 4 byte alignment at minimum.
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] picture The AVFrame in which the decoded video frame will be stored.
+ * Use avcodec_alloc_frame to get an AVFrame, the codec will
+ * allocate memory for the actual bitmap.
+ * with default get/release_buffer(), the decoder frees/reuses the bitmap as it sees fit.
+ * with overridden get/release_buffer() (needs CODEC_CAP_DR1) the user decides into what buffer the decoder
+ * decodes and the decoder tells the user once it does not need the data anymore,
+ * the user app can at this point free/reuse/keep the memory as it sees fit.
+ *
+ * @param[in] avpkt The input AVpacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields like
+ * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
+ * fields possible.
+ * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ AVPacket *avpkt);
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expect to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform badly
+ * due to a potentially very different allocation pattern.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be
+ freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+ int *got_sub_ptr,
+ AVPacket *avpkt);
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+typedef struct AVCodecParserContext {
+ void *priv_data;
+ struct AVCodecParser *parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ /**
+ * Time difference in stream time base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current frame.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+
+ /**
+ * Duration of the current frame.
+ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+ * For all other types, this is in units of AVCodecContext.time_base.
+ */
+ int duration;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[5]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext *s);
+ int (*parser_parse)(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext *s);
+ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+ struct AVCodecParser *next;
+} AVCodecParser;
+
+AVCodecParser *av_parser_next(AVCodecParser *c);
+
+void av_register_codec_parser(AVCodecParser *parser);
+AVCodecParserContext *av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
+ * @param buf input buffer.
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos);
+
+/**
+ * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed
+ * @deprecated use AVBitstreamFilter
+ */
+int av_parser_change(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_parser_close(AVCodecParserContext *s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+#if FF_API_OLD_ENCODE_AUDIO
+/**
+ * Encode an audio frame from samples into buf.
+ *
+ * @deprecated Use avcodec_encode_audio2 instead.
+ *
+ * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
+ * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
+ * will know how much space is needed because it depends on the value passed
+ * in buf_size as described below. In that case a lower value can be used.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer
+ * @param[in] buf_size the output buffer size
+ * @param[in] samples the input buffer containing the samples
+ * The number of samples read from this buffer is frame_size*channels,
+ * both of which are defined in avctx.
+ * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
+ * samples read from samples is equal to:
+ * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
+ * This also implies that av_get_bits_per_sample() must not return 0 for these
+ * codecs.
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used to encode the data read from the input buffer.
+ */
+int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size,
+ const short *samples);
+#endif
+
+/**
+ * Encode a frame of audio.
+ *
+ * Takes input samples from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay, split, and combine input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If it is not set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames except the last.
+ * The final frame may be smaller than avctx->frame_size.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+#if FF_API_OLD_ENCODE_VIDEO
+/**
+ * @deprecated use avcodec_encode_video2() instead.
+ *
+ * Encode a video frame from pict into buf.
+ * The input picture should be
+ * stored using a specific format, namely avctx.pix_fmt.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer for the bitstream of encoded frame
+ * @param[in] buf_size the size of the output buffer in bytes
+ * @param[in] pict the input picture to encode
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used from the output buffer.
+ */
+attribute_deprecated
+int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVFrame *pict);
+#endif
+
+/**
+ * Encode a frame of video.
+ *
+ * Takes input raw video data from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay and reorder input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet. The returned data (if any) belongs to the
+ * caller, he is responsible for freeing it.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw video data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub);
+
+
+/**
+ * @}
+ */
+
+#if FF_API_AVCODEC_RESAMPLE
+/**
+ * @defgroup lavc_resample Audio resampling
+ * @ingroup libavc
+ * @deprecated use libavresample instead
+ *
+ * @{
+ */
+struct ReSampleContext;
+struct AVResampleContext;
+
+typedef struct ReSampleContext ReSampleContext;
+
+/**
+ * Initialize audio resampling context.
+ *
+ * @param output_channels number of output channels
+ * @param input_channels number of input channels
+ * @param output_rate output sample rate
+ * @param input_rate input sample rate
+ * @param sample_fmt_out requested output sample format
+ * @param sample_fmt_in input sample format
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear if 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ * @return allocated ReSampleContext, NULL if error occurred
+ */
+attribute_deprecated
+ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
+ int output_rate, int input_rate,
+ enum AVSampleFormat sample_fmt_out,
+ enum AVSampleFormat sample_fmt_in,
+ int filter_length, int log2_phase_count,
+ int linear, double cutoff);
+
+attribute_deprecated
+int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
+
+/**
+ * Free resample context.
+ *
+ * @param s a non-NULL pointer to a resample context previously
+ * created with av_audio_resample_init()
+ */
+attribute_deprecated
+void audio_resample_close(ReSampleContext *s);
+
+
+/**
+ * Initialize an audio resampler.
+ * Note, if either rate is not an integer then simply scale both rates up so they are.
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear If 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ */
+attribute_deprecated
+struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
+
+/**
+ * Resample an array of samples using a previously configured context.
+ * @param src an array of unconsumed samples
+ * @param consumed the number of samples of src which have been consumed are returned here
+ * @param src_size the number of unconsumed samples available
+ * @param dst_size the amount of space in samples available in dst
+ * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
+ * @return the number of samples written in dst or -1 if an error occurred
+ */
+attribute_deprecated
+int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
+
+
+/**
+ * Compensate samplerate/timestamp drift. The compensation is done by changing
+ * the resampler parameters, so no audible clicks or similar distortions occur
+ * @param compensation_distance distance in output samples over which the compensation should be performed
+ * @param sample_delta number of output samples which should be output less
+ *
+ * example: av_resample_compensate(c, 10, 500)
+ * here instead of 510 samples only 500 samples would be output
+ *
+ * note, due to rounding the actual compensation might be slightly different,
+ * especially if the compensation_distance is large and the in_rate used during init is small
+ */
+attribute_deprecated
+void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
+attribute_deprecated
+void av_resample_close(struct AVResampleContext *c);
+
+/**
+ * @}
+ */
+#endif
+
+/**
+ * @addtogroup lavc_picture
+ * @{
+ */
+
+/**
+ * Allocate memory for a picture. Call avpicture_free() to free it.
+ *
+ * @see avpicture_fill()
+ *
+ * @param picture the picture to be filled in
+ * @param pix_fmt the format of the picture
+ * @param width the width of the picture
+ * @param height the height of the picture
+ * @return zero if successful, a negative value if not
+ */
+int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Free a picture previously allocated by avpicture_alloc().
+ * The data buffer used by the AVPicture is freed, but the AVPicture structure
+ * itself is not.
+ *
+ * @param picture the AVPicture to be freed
+ */
+void avpicture_free(AVPicture *picture);
+
+/**
+ * Fill in the AVPicture fields.
+ * The fields of the given AVPicture are filled in by using the 'ptr' address
+ * which points to the image data buffer. Depending on the specified picture
+ * format, one or multiple image data pointers and line sizes will be set.
+ * If a planar format is specified, several pointers will be set pointing to
+ * the different picture planes and the line sizes of the different planes
+ * will be stored in the lines_sizes array.
+ * Call with ptr == NULL to get the required size for the ptr buffer.
+ *
+ * To allocate the buffer and fill in the AVPicture fields in one call,
+ * use avpicture_alloc().
+ *
+ * @param picture AVPicture whose fields are to be filled in
+ * @param ptr Buffer which will contain or contains the actual image data
+ * @param pix_fmt The format in which the picture data is stored.
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @return size of the image data in bytes
+ */
+int avpicture_fill(AVPicture *picture, uint8_t *ptr,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Copy pixel data from an AVPicture into a buffer.
+ * The data is stored compactly, without any gaps for alignment or padding
+ * which may be applied by avpicture_fill().
+ *
+ * @see avpicture_get_size()
+ *
+ * @param[in] src AVPicture containing image data
+ * @param[in] pix_fmt The format in which the picture data is stored.
+ * @param[in] width the width of the image in pixels.
+ * @param[in] height the height of the image in pixels.
+ * @param[out] dest A buffer into which picture data will be copied.
+ * @param[in] dest_size The size of 'dest'.
+ * @return The number of bytes written to dest, or a negative value (error code) on error.
+ */
+int avpicture_layout(const AVPicture* src, enum AVPixelFormat pix_fmt,
+ int width, int height,
+ unsigned char *dest, int dest_size);
+
+/**
+ * Calculate the size in bytes that a picture of the given width and height
+ * would occupy if stored in the given picture format.
+ * Note that this returns the size of a compact representation as generated
+ * by avpicture_layout(), which can be smaller than the size required for e.g.
+ * avpicture_fill().
+ *
+ * @param pix_fmt the given picture format
+ * @param width the width of the image
+ * @param height the height of the image
+ * @return Image data size in bytes or -1 on error (e.g. too large dimensions).
+ */
+int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * deinterlace - if not supported return -1
+ */
+int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height);
+/**
+ * Copy image src to dst. Wraps av_picture_data_copy() above.
+ */
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Crop image top and left side.
+ */
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * Pad image.
+ */
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,
+ int padtop, int padbottom, int padleft, int padright, int *color);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * @deprecated Use av_pix_fmt_get_chroma_sub_sample
+ */
+
+void attribute_deprecated avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * avcodec_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur.
+ */
+int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,
+ int has_alpha);
+
+#if FF_API_FIND_BEST_PIX_FMT
+/**
+ * @deprecated use avcodec_find_best_pix_fmt2() instead.
+ *
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_mask parameter.
+ *
+ * @code
+ * src_pix_fmt = AV_PIX_FMT_YUV420P;
+ * pix_fmt_mask = (1 << AV_PIX_FMT_YUV422P) || (1 << AV_PIX_FMT_RGB24);
+ * dst_pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src_pix_fmt, alpha, &loss);
+ * @endcode
+ *
+ * @param[in] pix_fmt_mask bitmask determining which pixel format to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+attribute_deprecated
+enum AVPixelFormat avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+#endif /* FF_API_FIND_BEST_PIX_FMT */
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt2() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+/**
+ * @}
+ */
+
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+
+/**
+ * Put a string representing the codec tag codec_tag in buf.
+ *
+ * @param buf_size size in bytes of buf
+ * @return the length of the string that would have been generated if
+ * enough space had been available, excluding the trailing null
+ */
+size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+/**
+ * Fill audio frame data and linesize.
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align);
+
+/**
+ * Flush buffers, should be called when seeking or when switching to a different stream.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+void avcodec_default_free_buffers(AVCodecContext *s);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
+
+
+typedef struct AVBitStreamFilterContext {
+ void *priv_data;
+ struct AVBitStreamFilter *filter;
+ AVCodecParserContext *parser;
+ struct AVBitStreamFilterContext *next;
+} AVBitStreamFilterContext;
+
+
+typedef struct AVBitStreamFilter {
+ const char *name;
+ int priv_data_size;
+ int (*filter)(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+ void (*close)(AVBitStreamFilterContext *bsfc);
+ struct AVBitStreamFilter *next;
+} AVBitStreamFilter;
+
+void av_register_bitstream_filter(AVBitStreamFilter *bsf);
+AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
+int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
+
+AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f);
+
+/* memory */
+
+/**
+ * Reallocate the given block if it is not large enough, otherwise do nothing.
+ *
+ * @see av_realloc
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ * *size 0 if an error occurred.
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer with padding, reusing the given one if large enough.
+ *
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * FF_INPUT_PADDING_SIZE at the end which will always memset to 0.
+ *
+ */
+void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+/**
+ * Log a generic warning message about a missing feature. This function is
+ * intended to be used internally by Libav (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] feature string containing the name of the missing feature
+ * @param[in] want_sample indicates if samples are wanted which exhibit this feature.
+ * If want_sample is non-zero, additional verbage will be added to the log
+ * message which tells the user how to report samples to the development
+ * mailing list.
+ */
+void av_log_missing_feature(void *avc, const char *feature, int want_sample);
+
+/**
+ * Log a generic warning message asking for a sample. This function is
+ * intended to be used internally by Libav (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing an optional message, or NULL if no message
+ */
+void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);
+
+/**
+ * Register the hardware accelerator hwaccel.
+ */
+void av_register_hwaccel(AVHWAccel *hwaccel);
+
+/**
+ * If hwaccel is NULL, returns the first registered hardware accelerator,
+ * if hwaccel is non-NULL, returns the next registered hardware accelerator
+ * after hwaccel, or NULL if hwaccel is the last one.
+ */
+AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel);
+
+
+/**
+ * Lock operation used by lockmgr
+ */
+enum AVLockOp {
+ AV_LOCK_CREATE, ///< Create a mutex
+ AV_LOCK_OBTAIN, ///< Lock the mutex
+ AV_LOCK_RELEASE, ///< Unlock the mutex
+ AV_LOCK_DESTROY, ///< Free mutex resources
+};
+
+/**
+ * Register a user provided lock manager supporting the operations
+ * specified by AVLockOp. mutex points to a (void *) where the
+ * lockmgr should store/get a pointer to a user allocated mutex. It's
+ * NULL upon AV_LOCK_CREATE and != NULL for all other ops.
+ *
+ * @param cb User defined callback. Note: Libav may invoke calls to this
+ * callback during the call to av_lockmgr_register().
+ * Thus, the application must be prepared to handle that.
+ * If cb is set to NULL the lockmgr will be unregistered.
+ * Also note that during unregistration the previously registered
+ * lockmgr callback may also be invoked.
+ */
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext *s);
+
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec *codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec *codec);
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ * exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/avfft.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/avfft.h
new file mode 100644
index 000000000..b89618258
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/avfft.h
@@ -0,0 +1,116 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVFFT_H
+#define AVCODEC_AVFFT_H
+
+/**
+ * @file
+ * @ingroup lavc_fft
+ * FFT functions
+ */
+
+/**
+ * @defgroup lavc_fft FFT functions
+ * @ingroup lavc_misc
+ *
+ * @{
+ */
+
+typedef float FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext FFTContext;
+
+/**
+ * Set up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+FFTContext *av_fft_init(int nbits, int inverse);
+
+/**
+ * Do the permutation needed BEFORE calling ff_fft_calc().
+ */
+void av_fft_permute(FFTContext *s, FFTComplex *z);
+
+/**
+ * Do a complex FFT with the parameters defined in av_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+void av_fft_calc(FFTContext *s, FFTComplex *z);
+
+void av_fft_end(FFTContext *s);
+
+FFTContext *av_mdct_init(int nbits, int inverse, double scale);
+void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_end(FFTContext *s);
+
+/* Real Discrete Fourier Transform */
+
+enum RDFTransformType {
+ DFT_R2C,
+ IDFT_C2R,
+ IDFT_R2C,
+ DFT_C2R,
+};
+
+typedef struct RDFTContext RDFTContext;
+
+/**
+ * Set up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);
+void av_rdft_calc(RDFTContext *s, FFTSample *data);
+void av_rdft_end(RDFTContext *s);
+
+/* Discrete Cosine Transform */
+
+typedef struct DCTContext DCTContext;
+
+enum DCTTransformType {
+ DCT_II = 0,
+ DCT_III,
+ DCT_I,
+ DST_I,
+};
+
+/**
+ * Set up DCT.
+ * @param nbits size of the input array:
+ * (1 << nbits) for DCT-II, DCT-III and DST-I
+ * (1 << nbits) + 1 for DCT-I
+ *
+ * @note the first element of the input of DST-I is ignored
+ */
+DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
+void av_dct_calc(DCTContext *s, FFTSample *data);
+void av_dct_end (DCTContext *s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVFFT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/dxva2.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/dxva2.h
new file mode 100644
index 000000000..c06f1f333
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/dxva2.h
@@ -0,0 +1,88 @@
+/*
+ * DXVA2 HW acceleration
+ *
+ * copyright (c) 2009 Laurent Aimar
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DXVA_H
+#define AVCODEC_DXVA_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_dxva2
+ * Public libavcodec DXVA2 header.
+ */
+
+#include <stdint.h>
+
+#include <d3d9.h>
+#include <dxva2api.h>
+
+/**
+ * @defgroup lavc_codec_hwaccel_dxva2 DXVA2
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards
+
+/**
+ * This structure is used to provides the necessary configurations and data
+ * to the DXVA2 Libav HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct dxva_context {
+ /**
+ * DXVA2 decoder object
+ */
+ IDirectXVideoDecoder *decoder;
+
+ /**
+ * DXVA2 configuration used to create the decoder
+ */
+ const DXVA2_ConfigPictureDecode *cfg;
+
+ /**
+ * The number of surface in the surface array
+ */
+ unsigned surface_count;
+
+ /**
+ * The array of Direct3D surfaces used to create the decoder
+ */
+ LPDIRECT3DSURFACE9 *surface;
+
+ /**
+ * A bit field configuring the workarounds needed for using the decoder
+ */
+ uint64_t workaround;
+
+ /**
+ * Private to the Libav AVHWAccel implementation
+ */
+ unsigned report_id;
+};
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_DXVA_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/old_codec_ids.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/old_codec_ids.h
new file mode 100644
index 000000000..2b72e38d2
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/old_codec_ids.h
@@ -0,0 +1,366 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_OLD_CODEC_IDS_H
+#define AVCODEC_OLD_CODEC_IDS_H
+
+/*
+ * This header exists to prevent new codec IDs from being accidentally added to
+ * the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVCodecID enum instead.
+ */
+
+ CODEC_ID_NONE = AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ CODEC_ID_MPEG1VIDEO,
+ CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+ CODEC_ID_MPEG2VIDEO_XVMC,
+ CODEC_ID_H261,
+ CODEC_ID_H263,
+ CODEC_ID_RV10,
+ CODEC_ID_RV20,
+ CODEC_ID_MJPEG,
+ CODEC_ID_MJPEGB,
+ CODEC_ID_LJPEG,
+ CODEC_ID_SP5X,
+ CODEC_ID_JPEGLS,
+ CODEC_ID_MPEG4,
+ CODEC_ID_RAWVIDEO,
+ CODEC_ID_MSMPEG4V1,
+ CODEC_ID_MSMPEG4V2,
+ CODEC_ID_MSMPEG4V3,
+ CODEC_ID_WMV1,
+ CODEC_ID_WMV2,
+ CODEC_ID_H263P,
+ CODEC_ID_H263I,
+ CODEC_ID_FLV1,
+ CODEC_ID_SVQ1,
+ CODEC_ID_SVQ3,
+ CODEC_ID_DVVIDEO,
+ CODEC_ID_HUFFYUV,
+ CODEC_ID_CYUV,
+ CODEC_ID_H264,
+ CODEC_ID_INDEO3,
+ CODEC_ID_VP3,
+ CODEC_ID_THEORA,
+ CODEC_ID_ASV1,
+ CODEC_ID_ASV2,
+ CODEC_ID_FFV1,
+ CODEC_ID_4XM,
+ CODEC_ID_VCR1,
+ CODEC_ID_CLJR,
+ CODEC_ID_MDEC,
+ CODEC_ID_ROQ,
+ CODEC_ID_INTERPLAY_VIDEO,
+ CODEC_ID_XAN_WC3,
+ CODEC_ID_XAN_WC4,
+ CODEC_ID_RPZA,
+ CODEC_ID_CINEPAK,
+ CODEC_ID_WS_VQA,
+ CODEC_ID_MSRLE,
+ CODEC_ID_MSVIDEO1,
+ CODEC_ID_IDCIN,
+ CODEC_ID_8BPS,
+ CODEC_ID_SMC,
+ CODEC_ID_FLIC,
+ CODEC_ID_TRUEMOTION1,
+ CODEC_ID_VMDVIDEO,
+ CODEC_ID_MSZH,
+ CODEC_ID_ZLIB,
+ CODEC_ID_QTRLE,
+ CODEC_ID_SNOW,
+ CODEC_ID_TSCC,
+ CODEC_ID_ULTI,
+ CODEC_ID_QDRAW,
+ CODEC_ID_VIXL,
+ CODEC_ID_QPEG,
+ CODEC_ID_PNG,
+ CODEC_ID_PPM,
+ CODEC_ID_PBM,
+ CODEC_ID_PGM,
+ CODEC_ID_PGMYUV,
+ CODEC_ID_PAM,
+ CODEC_ID_FFVHUFF,
+ CODEC_ID_RV30,
+ CODEC_ID_RV40,
+ CODEC_ID_VC1,
+ CODEC_ID_WMV3,
+ CODEC_ID_LOCO,
+ CODEC_ID_WNV1,
+ CODEC_ID_AASC,
+ CODEC_ID_INDEO2,
+ CODEC_ID_FRAPS,
+ CODEC_ID_TRUEMOTION2,
+ CODEC_ID_BMP,
+ CODEC_ID_CSCD,
+ CODEC_ID_MMVIDEO,
+ CODEC_ID_ZMBV,
+ CODEC_ID_AVS,
+ CODEC_ID_SMACKVIDEO,
+ CODEC_ID_NUV,
+ CODEC_ID_KMVC,
+ CODEC_ID_FLASHSV,
+ CODEC_ID_CAVS,
+ CODEC_ID_JPEG2000,
+ CODEC_ID_VMNC,
+ CODEC_ID_VP5,
+ CODEC_ID_VP6,
+ CODEC_ID_VP6F,
+ CODEC_ID_TARGA,
+ CODEC_ID_DSICINVIDEO,
+ CODEC_ID_TIERTEXSEQVIDEO,
+ CODEC_ID_TIFF,
+ CODEC_ID_GIF,
+ CODEC_ID_DXA,
+ CODEC_ID_DNXHD,
+ CODEC_ID_THP,
+ CODEC_ID_SGI,
+ CODEC_ID_C93,
+ CODEC_ID_BETHSOFTVID,
+ CODEC_ID_PTX,
+ CODEC_ID_TXD,
+ CODEC_ID_VP6A,
+ CODEC_ID_AMV,
+ CODEC_ID_VB,
+ CODEC_ID_PCX,
+ CODEC_ID_SUNRAST,
+ CODEC_ID_INDEO4,
+ CODEC_ID_INDEO5,
+ CODEC_ID_MIMIC,
+ CODEC_ID_RL2,
+ CODEC_ID_ESCAPE124,
+ CODEC_ID_DIRAC,
+ CODEC_ID_BFI,
+ CODEC_ID_CMV,
+ CODEC_ID_MOTIONPIXELS,
+ CODEC_ID_TGV,
+ CODEC_ID_TGQ,
+ CODEC_ID_TQI,
+ CODEC_ID_AURA,
+ CODEC_ID_AURA2,
+ CODEC_ID_V210X,
+ CODEC_ID_TMV,
+ CODEC_ID_V210,
+ CODEC_ID_DPX,
+ CODEC_ID_MAD,
+ CODEC_ID_FRWU,
+ CODEC_ID_FLASHSV2,
+ CODEC_ID_CDGRAPHICS,
+ CODEC_ID_R210,
+ CODEC_ID_ANM,
+ CODEC_ID_BINKVIDEO,
+ CODEC_ID_IFF_ILBM,
+ CODEC_ID_IFF_BYTERUN1,
+ CODEC_ID_KGV1,
+ CODEC_ID_YOP,
+ CODEC_ID_VP8,
+ CODEC_ID_PICTOR,
+ CODEC_ID_ANSI,
+ CODEC_ID_A64_MULTI,
+ CODEC_ID_A64_MULTI5,
+ CODEC_ID_R10K,
+ CODEC_ID_MXPEG,
+ CODEC_ID_LAGARITH,
+ CODEC_ID_PRORES,
+ CODEC_ID_JV,
+ CODEC_ID_DFA,
+ CODEC_ID_WMV3IMAGE,
+ CODEC_ID_VC1IMAGE,
+ CODEC_ID_UTVIDEO,
+ CODEC_ID_BMV_VIDEO,
+ CODEC_ID_VBLE,
+ CODEC_ID_DXTORY,
+ CODEC_ID_V410,
+ CODEC_ID_XWD,
+ CODEC_ID_CDXL,
+ CODEC_ID_XBM,
+ CODEC_ID_ZEROCODEC,
+ CODEC_ID_MSS1,
+ CODEC_ID_MSA1,
+ CODEC_ID_TSCC2,
+ CODEC_ID_MTS2,
+ CODEC_ID_CLLC,
+
+ /* various PCM "codecs" */
+ CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ CODEC_ID_PCM_S16LE = 0x10000,
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_PCM_U16LE,
+ CODEC_ID_PCM_U16BE,
+ CODEC_ID_PCM_S8,
+ CODEC_ID_PCM_U8,
+ CODEC_ID_PCM_MULAW,
+ CODEC_ID_PCM_ALAW,
+ CODEC_ID_PCM_S32LE,
+ CODEC_ID_PCM_S32BE,
+ CODEC_ID_PCM_U32LE,
+ CODEC_ID_PCM_U32BE,
+ CODEC_ID_PCM_S24LE,
+ CODEC_ID_PCM_S24BE,
+ CODEC_ID_PCM_U24LE,
+ CODEC_ID_PCM_U24BE,
+ CODEC_ID_PCM_S24DAUD,
+ CODEC_ID_PCM_ZORK,
+ CODEC_ID_PCM_S16LE_PLANAR,
+ CODEC_ID_PCM_DVD,
+ CODEC_ID_PCM_F32BE,
+ CODEC_ID_PCM_F32LE,
+ CODEC_ID_PCM_F64BE,
+ CODEC_ID_PCM_F64LE,
+ CODEC_ID_PCM_BLURAY,
+ CODEC_ID_PCM_LXF,
+ CODEC_ID_S302M,
+ CODEC_ID_PCM_S8_PLANAR,
+
+ /* various ADPCM codecs */
+ CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ CODEC_ID_ADPCM_IMA_WAV,
+ CODEC_ID_ADPCM_IMA_DK3,
+ CODEC_ID_ADPCM_IMA_DK4,
+ CODEC_ID_ADPCM_IMA_WS,
+ CODEC_ID_ADPCM_IMA_SMJPEG,
+ CODEC_ID_ADPCM_MS,
+ CODEC_ID_ADPCM_4XM,
+ CODEC_ID_ADPCM_XA,
+ CODEC_ID_ADPCM_ADX,
+ CODEC_ID_ADPCM_EA,
+ CODEC_ID_ADPCM_G726,
+ CODEC_ID_ADPCM_CT,
+ CODEC_ID_ADPCM_SWF,
+ CODEC_ID_ADPCM_YAMAHA,
+ CODEC_ID_ADPCM_SBPRO_4,
+ CODEC_ID_ADPCM_SBPRO_3,
+ CODEC_ID_ADPCM_SBPRO_2,
+ CODEC_ID_ADPCM_THP,
+ CODEC_ID_ADPCM_IMA_AMV,
+ CODEC_ID_ADPCM_EA_R1,
+ CODEC_ID_ADPCM_EA_R3,
+ CODEC_ID_ADPCM_EA_R2,
+ CODEC_ID_ADPCM_IMA_EA_SEAD,
+ CODEC_ID_ADPCM_IMA_EA_EACS,
+ CODEC_ID_ADPCM_EA_XAS,
+ CODEC_ID_ADPCM_EA_MAXIS_XA,
+ CODEC_ID_ADPCM_IMA_ISS,
+ CODEC_ID_ADPCM_G722,
+ CODEC_ID_ADPCM_IMA_APC,
+
+ /* AMR */
+ CODEC_ID_AMR_NB = 0x12000,
+ CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ CODEC_ID_RA_144 = 0x13000,
+ CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ CODEC_ID_ROQ_DPCM = 0x14000,
+ CODEC_ID_INTERPLAY_DPCM,
+ CODEC_ID_XAN_DPCM,
+ CODEC_ID_SOL_DPCM,
+
+ /* audio codecs */
+ CODEC_ID_MP2 = 0x15000,
+ CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ CODEC_ID_AAC,
+ CODEC_ID_AC3,
+ CODEC_ID_DTS,
+ CODEC_ID_VORBIS,
+ CODEC_ID_DVAUDIO,
+ CODEC_ID_WMAV1,
+ CODEC_ID_WMAV2,
+ CODEC_ID_MACE3,
+ CODEC_ID_MACE6,
+ CODEC_ID_VMDAUDIO,
+ CODEC_ID_FLAC,
+ CODEC_ID_MP3ADU,
+ CODEC_ID_MP3ON4,
+ CODEC_ID_SHORTEN,
+ CODEC_ID_ALAC,
+ CODEC_ID_WESTWOOD_SND1,
+ CODEC_ID_GSM, ///< as in Berlin toast format
+ CODEC_ID_QDM2,
+ CODEC_ID_COOK,
+ CODEC_ID_TRUESPEECH,
+ CODEC_ID_TTA,
+ CODEC_ID_SMACKAUDIO,
+ CODEC_ID_QCELP,
+ CODEC_ID_WAVPACK,
+ CODEC_ID_DSICINAUDIO,
+ CODEC_ID_IMC,
+ CODEC_ID_MUSEPACK7,
+ CODEC_ID_MLP,
+ CODEC_ID_GSM_MS, /* as found in WAV */
+ CODEC_ID_ATRAC3,
+ CODEC_ID_VOXWARE,
+ CODEC_ID_APE,
+ CODEC_ID_NELLYMOSER,
+ CODEC_ID_MUSEPACK8,
+ CODEC_ID_SPEEX,
+ CODEC_ID_WMAVOICE,
+ CODEC_ID_WMAPRO,
+ CODEC_ID_WMALOSSLESS,
+ CODEC_ID_ATRAC3P,
+ CODEC_ID_EAC3,
+ CODEC_ID_SIPR,
+ CODEC_ID_MP1,
+ CODEC_ID_TWINVQ,
+ CODEC_ID_TRUEHD,
+ CODEC_ID_MP4ALS,
+ CODEC_ID_ATRAC1,
+ CODEC_ID_BINKAUDIO_RDFT,
+ CODEC_ID_BINKAUDIO_DCT,
+ CODEC_ID_AAC_LATM,
+ CODEC_ID_QDMC,
+ CODEC_ID_CELT,
+ CODEC_ID_G723_1,
+ CODEC_ID_G729,
+ CODEC_ID_8SVX_EXP,
+ CODEC_ID_8SVX_FIB,
+ CODEC_ID_BMV_AUDIO,
+ CODEC_ID_RALF,
+ CODEC_ID_IAC,
+ CODEC_ID_ILBC,
+
+ /* subtitle codecs */
+ CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ CODEC_ID_DVD_SUBTITLE = 0x17000,
+ CODEC_ID_DVB_SUBTITLE,
+ CODEC_ID_TEXT, ///< raw UTF-8 text
+ CODEC_ID_XSUB,
+ CODEC_ID_SSA,
+ CODEC_ID_MOV_TEXT,
+ CODEC_ID_HDMV_PGS_SUBTITLE,
+ CODEC_ID_DVB_TELETEXT,
+ CODEC_ID_SRT,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ CODEC_ID_TTF = 0x18000,
+
+ CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
+
+ CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+
+#endif /* AVCODEC_OLD_CODEC_IDS_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vaapi.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vaapi.h
new file mode 100644
index 000000000..39e88259d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vaapi.h
@@ -0,0 +1,173 @@
+/*
+ * Video Acceleration API (shared data between Libav and the video player)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2008-2009 Splitted-Desktop Systems
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_H
+#define AVCODEC_VAAPI_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vaapi
+ * Public libavcodec VA API header.
+ */
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
+ * @ingroup lavc_codec_hwaccel
+ * @{
+ */
+
+/**
+ * This structure is used to share data between the Libav library and
+ * the client video application.
+ * This shall be zero-allocated and available as
+ * AVCodecContext.hwaccel_context. All user members can be set once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ */
+struct vaapi_context {
+ /**
+ * Window system dependent data
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *display;
+
+ /**
+ * Configuration ID
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t config_id;
+
+ /**
+ * Context ID (video decode pipeline)
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t context_id;
+
+ /**
+ * VAPictureParameterBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t pic_param_buf_id;
+
+ /**
+ * VAIQMatrixBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t iq_matrix_buf_id;
+
+ /**
+ * VABitPlaneBuffer ID (for VC-1 decoding)
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t bitplane_buf_id;
+
+ /**
+ * Slice parameter/data buffer IDs
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t *slice_buf_ids;
+
+ /**
+ * Number of effective slice buffer IDs to send to the HW
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int n_slice_buf_ids;
+
+ /**
+ * Size of pre-allocated slice_buf_ids
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_buf_ids_alloc;
+
+ /**
+ * Pointer to VASliceParameterBuffers
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ void *slice_params;
+
+ /**
+ * Size of a VASliceParameterBuffer element
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_param_size;
+
+ /**
+ * Size of pre-allocated slice_params
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_params_alloc;
+
+ /**
+ * Number of slices currently filled in
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_count;
+
+ /**
+ * Pointer to slice data buffer base
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ const uint8_t *slice_data;
+
+ /**
+ * Current size of slice data
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t slice_data_size;
+};
+
+/* @} */
+
+#endif /* AVCODEC_VAAPI_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vda.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vda.h
new file mode 100644
index 000000000..f0ec2bfec
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vda.h
@@ -0,0 +1,217 @@
+/*
+ * VDA HW acceleration
+ *
+ * copyright (c) 2011 Sebastien Zwickert
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDA_H
+#define AVCODEC_VDA_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vda
+ * Public libavcodec VDA header.
+ */
+
+#include "libavcodec/version.h"
+
+#if FF_API_VDA_ASYNC
+#include <pthread.h>
+#endif
+
+#include <stdint.h>
+
+// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes
+// http://openradar.appspot.com/8026390
+#undef __GNUC_STDC_INLINE__
+
+#define Picture QuickdrawPicture
+#include <VideoDecodeAcceleration/VDADecoder.h>
+#undef Picture
+
+/**
+ * @defgroup lavc_codec_hwaccel_vda VDA
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+#if FF_API_VDA_ASYNC
+/**
+ * This structure is used to store decoded frame information and data.
+ *
+ * @deprecated Use synchronous decoding mode.
+ */
+typedef struct vda_frame {
+ /**
+ * The PTS of the frame.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ int64_t pts;
+
+ /**
+ * The CoreVideo buffer that contains the decoded data.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ CVPixelBufferRef cv_buffer;
+
+ /**
+ * A pointer to the next frame.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ struct vda_frame *next_frame;
+} vda_frame;
+#endif
+
+/**
+ * This structure is used to provide the necessary configurations and data
+ * to the VDA Libav HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct vda_context {
+ /**
+ * VDA decoder object.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ VDADecoder decoder;
+
+ /**
+ * The Core Video pixel buffer that contains the current image data.
+ *
+ * encoding: unused
+ * decoding: Set by libavcodec. Unset by user.
+ */
+ CVPixelBufferRef cv_buffer;
+
+ /**
+ * Use the hardware decoder in synchronous mode.
+ *
+ * encoding: unused
+ * decoding: Set by user.
+ */
+ int use_sync_decoding;
+
+#if FF_API_VDA_ASYNC
+ /**
+ * VDA frames queue ordered by presentation timestamp.
+ *
+ * @deprecated Use synchronous decoding mode.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ vda_frame *queue;
+
+ /**
+ * Mutex for locking queue operations.
+ *
+ * @deprecated Use synchronous decoding mode.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ pthread_mutex_t queue_mutex;
+#endif
+
+ /**
+ * The frame width.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int width;
+
+ /**
+ * The frame height.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int height;
+
+ /**
+ * The frame format.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int format;
+
+ /**
+ * The pixel format for output image buffers.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ OSType cv_pix_fmt_type;
+
+ /**
+ * The current bitstream buffer.
+ */
+ uint8_t *priv_bitstream;
+
+ /**
+ * The current size of the bitstream.
+ */
+ int priv_bitstream_size;
+
+ /**
+ * The reference size used for fast reallocation.
+ */
+ int priv_allocated_size;
+};
+
+/** Create the video decoder. */
+int ff_vda_create_decoder(struct vda_context *vda_ctx,
+ uint8_t *extradata,
+ int extradata_size);
+
+/** Destroy the video decoder. */
+int ff_vda_destroy_decoder(struct vda_context *vda_ctx);
+
+#if FF_API_VDA_ASYNC
+/**
+ * Return the top frame of the queue.
+ *
+ * @deprecated Use synchronous decoding mode.
+ */
+vda_frame *ff_vda_queue_pop(struct vda_context *vda_ctx);
+
+/**
+ * Release the given frame.
+ *
+ * @deprecated Use synchronous decoding mode.
+ */
+void ff_vda_release_vda_frame(vda_frame *frame);
+#endif
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_VDA_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vdpau.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vdpau.h
new file mode 100644
index 000000000..241ff1905
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/vdpau.h
@@ -0,0 +1,94 @@
+/*
+ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
+ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
+ *
+ * Copyright (C) 2008 NVIDIA
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDPAU_H
+#define AVCODEC_VDPAU_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vdpau
+ * Public libavcodec VDPAU header.
+ */
+
+
+/**
+ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
+ * @ingroup lavc_codec_hwaccel
+ *
+ * VDPAU hardware acceleration has two modules
+ * - VDPAU decoding
+ * - VDPAU presentation
+ *
+ * The VDPAU decoding module parses all headers using Libav
+ * parsing mechanisms and uses VDPAU for the actual decoding.
+ *
+ * As per the current implementation, the actual decoding
+ * and rendering (API calls) are done as part of the VDPAU
+ * presentation (vo_vdpau.c) module.
+ *
+ * @{
+ */
+
+#include <vdpau/vdpau.h>
+#include <vdpau/vdpau_x11.h>
+
+/** @brief The videoSurface is used for rendering. */
+#define FF_VDPAU_STATE_USED_FOR_RENDER 1
+
+/**
+ * @brief The videoSurface is needed for reference/prediction.
+ * The codec manipulates this.
+ */
+#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2
+
+/**
+ * @brief This structure is used as a callback between the Libav
+ * decoder (vd_) and presentation (vo_) module.
+ * This is used for defining a video frame containing surface,
+ * picture parameter, bitstream information etc which are passed
+ * between the Libav decoder and its clients.
+ */
+struct vdpau_render_state {
+ VdpVideoSurface surface; ///< Used as rendered surface, never changed.
+
+ int state; ///< Holds FF_VDPAU_STATE_* values.
+
+ /** picture parameter information for all supported codecs */
+ union VdpPictureInfo {
+ VdpPictureInfoH264 h264;
+ VdpPictureInfoMPEG1Or2 mpeg;
+ VdpPictureInfoVC1 vc1;
+ VdpPictureInfoMPEG4Part2 mpeg4;
+ } info;
+
+ /** Describe size/location of the compressed video data.
+ Set to 0 when freeing bitstream_buffers. */
+ int bitstream_buffers_allocated;
+ int bitstream_buffers_used;
+ /** The user is responsible for freeing this buffer using av_freep(). */
+ VdpBitstreamBuffer *bitstream_buffers;
+};
+
+/* @}*/
+
+#endif /* AVCODEC_VDPAU_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/version.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/version.h
new file mode 100644
index 000000000..348ce99f2
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/version.h
@@ -0,0 +1,95 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#define LIBAVCODEC_VERSION_MAJOR 54
+#define LIBAVCODEC_VERSION_MINOR 35
+#define LIBAVCODEC_VERSION_MICRO 0
+
+#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+
+#ifndef FF_API_REQUEST_CHANNELS
+#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_OLD_DECODE_AUDIO
+#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_OLD_ENCODE_AUDIO
+#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_OLD_ENCODE_VIDEO
+#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_MPV_GLOBAL_OPTS
+#define FF_API_MPV_GLOBAL_OPTS (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_COLOR_TABLE_ID
+#define FF_API_COLOR_TABLE_ID (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_INTER_THRESHOLD
+#define FF_API_INTER_THRESHOLD (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_SUB_ID
+#define FF_API_SUB_ID (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_DSP_MASK
+#define FF_API_DSP_MASK (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_FIND_BEST_PIX_FMT
+#define FF_API_FIND_BEST_PIX_FMT (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_CODEC_ID
+#define FF_API_CODEC_ID (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_VDA_ASYNC
+#define FF_API_VDA_ASYNC (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_AVCODEC_RESAMPLE
+#define FF_API_AVCODEC_RESAMPLE (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_LIBMPEG2
+#define FF_API_LIBMPEG2 (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_MMI
+#define FF_API_MMI (LIBAVCODEC_VERSION_MAJOR < 55)
+#endif
+
+#endif /* AVCODEC_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavcodec/xvmc.h b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/xvmc.h
new file mode 100644
index 000000000..1f77e4efc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavcodec/xvmc.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2003 Ivan Kalvachev
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_XVMC_H
+#define AVCODEC_XVMC_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_xvmc
+ * Public libavcodec XvMC header.
+ */
+
+#include <X11/extensions/XvMC.h>
+
+#include "avcodec.h"
+
+/**
+ * @defgroup lavc_codec_hwaccel_xvmc XvMC
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct
+ the number is 1337 speak for the letters IDCT MCo (motion compensation) */
+
+struct xvmc_pix_fmt {
+ /** The field contains the special constant value AV_XVMC_ID.
+ It is used as a test that the application correctly uses the API,
+ and that there is no corruption caused by pixel routines.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int xvmc_id;
+
+ /** Pointer to the block array allocated by XvMCCreateBlocks().
+ The array has to be freed by XvMCDestroyBlocks().
+ Each group of 64 values represents one data block of differential
+ pixel information (in MoCo mode) or coefficients for IDCT.
+ - application - set the pointer during initialization
+ - libavcodec - fills coefficients/pixel data into the array
+ */
+ short* data_blocks;
+
+ /** Pointer to the macroblock description array allocated by
+ XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks().
+ - application - set the pointer during initialization
+ - libavcodec - fills description data into the array
+ */
+ XvMCMacroBlock* mv_blocks;
+
+ /** Number of macroblock descriptions that can be stored in the mv_blocks
+ array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_mv_blocks;
+
+ /** Number of blocks that can be stored at once in the data_blocks array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_data_blocks;
+
+ /** Indicate that the hardware would interpret data_blocks as IDCT
+ coefficients and perform IDCT on them.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int idct;
+
+ /** In MoCo mode it indicates that intra macroblocks are assumed to be in
+ unsigned format; same as the XVMC_INTRA_UNSIGNED flag.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int unsigned_intra;
+
+ /** Pointer to the surface allocated by XvMCCreateSurface().
+ It has to be freed by XvMCDestroySurface() on application exit.
+ It identifies the frame and its state on the video hardware.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ XvMCSurface* p_surface;
+
+/** Set by the decoder before calling ff_draw_horiz_band(),
+ needed by the XvMCRenderSurface function. */
+//@{
+ /** Pointer to the surface used as past reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_past_surface;
+
+ /** Pointer to the surface used as future reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_future_surface;
+
+ /** top/bottom field or frame
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int picture_structure;
+
+ /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int flags;
+//}@
+
+ /** Number of macroblock descriptions in the mv_blocks array
+ that have already been passed to the hardware.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may increment it
+ with filled_mb_block_num or zero both.
+ - libavcodec - unchanged
+ */
+ int start_mv_blocks_num;
+
+ /** Number of new macroblock descriptions in the mv_blocks array (after
+ start_mv_blocks_num) that are filled by libavcodec and have to be
+ passed to the hardware.
+ - application - zeroes it on get_buffer() or after successful
+ ff_draw_horiz_band().
+ - libavcodec - increment with one of each stored MB
+ */
+ int filled_mv_blocks_num;
+
+ /** Number of the next free data block; one data block consists of
+ 64 short values in the data_blocks array.
+ All blocks before this one have already been claimed by placing their
+ position into the corresponding block description structure field,
+ that are part of the mv_blocks array.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may zero it together
+ with start_mb_blocks_num.
+ - libavcodec - each decoded macroblock increases it by the number
+ of coded blocks it contains.
+ */
+ int next_free_data_block_num;
+};
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_XVMC_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/adler32.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/adler32.h
new file mode 100644
index 000000000..a8ff6f9d4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/adler32.h
@@ -0,0 +1,43 @@
+/*
+ * copyright (c) 2006 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ADLER32_H
+#define AVUTIL_ADLER32_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+/**
+ * @ingroup lavu_crypto
+ * Calculate the Adler32 checksum of a buffer.
+ *
+ * Passing the return value to a subsequent av_adler32_update() call
+ * allows the checksum of multiple buffers to be calculated as though
+ * they were concatenated.
+ *
+ * @param adler initial checksum value
+ * @param buf pointer to input buffer
+ * @param len size of input buffer
+ * @return updated checksum
+ */
+unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
+ unsigned int len) av_pure;
+
+#endif /* AVUTIL_ADLER32_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/aes.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/aes.h
new file mode 100644
index 000000000..edff275b7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/aes.h
@@ -0,0 +1,67 @@
+/*
+ * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AES_H
+#define AVUTIL_AES_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_aes AES
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#if FF_API_CONTEXT_SIZE
+extern attribute_deprecated const int av_aes_size;
+#endif
+
+struct AVAES;
+
+/**
+ * Allocate an AVAES context.
+ */
+struct AVAES *av_aes_alloc(void);
+
+/**
+ * Initialize an AVAES context.
+ * @param key_bits 128, 192 or 256
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ * @param count number of 16 byte blocks
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AES_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/attributes.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/attributes.h
new file mode 100644
index 000000000..292a0a1a8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/attributes.h
@@ -0,0 +1,122 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y)
+#else
+# define AV_GCC_VERSION_AT_LEAST(x,y) 0
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_always_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+# define av_always_inline __forceinline
+#else
+# define av_always_inline inline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_noinline __attribute__((noinline))
+#else
+# define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,6)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,1)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define attribute_deprecated __attribute__((deprecated))
+#else
+# define attribute_deprecated
+#endif
+
+#if defined(__GNUC__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,3)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+
+#if defined(__GNUC__) && !defined(__ICC)
+# define av_uninit(x) x=x
+#else
+# define av_uninit(x) x
+#endif
+
+#ifdef __GNUC__
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,5)
+# define av_noreturn __attribute__((noreturn))
+#else
+# define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/audio_fifo.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/audio_fifo.h
new file mode 100644
index 000000000..8c7638825
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/audio_fifo.h
@@ -0,0 +1,146 @@
+/*
+ * Audio FIFO
+ * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio FIFO Buffer
+ */
+
+#ifndef AVUTIL_AUDIO_FIFO_H
+#define AVUTIL_AUDIO_FIFO_H
+
+#include "avutil.h"
+#include "fifo.h"
+#include "samplefmt.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * Context for an Audio FIFO Buffer.
+ *
+ * - Operates at the sample level rather than the byte level.
+ * - Supports multiple channels with either planar or packed sample format.
+ * - Automatic reallocation when writing to a full buffer.
+ */
+typedef struct AVAudioFifo AVAudioFifo;
+
+/**
+ * Free an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to free
+ */
+void av_audio_fifo_free(AVAudioFifo *af);
+
+/**
+ * Allocate an AVAudioFifo.
+ *
+ * @param sample_fmt sample format
+ * @param channels number of channels
+ * @param nb_samples initial allocation size, in samples
+ * @return newly allocated AVAudioFifo, or NULL on error
+ */
+AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,
+ int nb_samples);
+
+/**
+ * Reallocate an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to reallocate
+ * @param nb_samples new allocation size, in samples
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Write data to an AVAudioFifo.
+ *
+ * The AVAudioFifo will be reallocated automatically if the available space
+ * is less than nb_samples.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to write to
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to write
+ * @return number of samples actually written, or negative AVERROR
+ * code on failure.
+ */
+int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Read data from an AVAudioFifo.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to read from
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to read
+ * @return number of samples actually read, or negative AVERROR code
+ * on failure.
+ */
+int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Drain data from an AVAudioFifo.
+ *
+ * Removes the data without reading it.
+ *
+ * @param af AVAudioFifo to drain
+ * @param nb_samples number of samples to drain
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Reset the AVAudioFifo buffer.
+ *
+ * This empties all data in the buffer.
+ *
+ * @param af AVAudioFifo to reset
+ */
+void av_audio_fifo_reset(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for reading.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for reading
+ */
+int av_audio_fifo_size(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for writing.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for writing
+ */
+int av_audio_fifo_space(AVAudioFifo *af);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AUDIO_FIFO_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/audioconvert.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/audioconvert.h
new file mode 100644
index 000000000..300a67cd3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/audioconvert.h
@@ -0,0 +1,6 @@
+
+#include "version.h"
+
+#if FF_API_AUDIOCONVERT
+#include "channel_layout.h"
+#endif
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/avassert.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avassert.h
new file mode 100644
index 000000000..b223d26e8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avassert.h
@@ -0,0 +1,66 @@
+/*
+ * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple assert() macros that are a bit more flexible than ISO C assert().
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_AVASSERT_H
+#define AVUTIL_AVASSERT_H
+
+#include <stdlib.h>
+#include "avutil.h"
+#include "log.h"
+
+/**
+ * assert() equivalent, that is always enabled.
+ */
+#define av_assert0(cond) do { \
+ if (!(cond)) { \
+ av_log(NULL, AV_LOG_FATAL, "Assertion %s failed at %s:%d\n", \
+ AV_STRINGIFY(cond), __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+
+
+/**
+ * assert() equivalent, that does not lie in speed critical code.
+ * These asserts() thus can be enabled without fearing speedloss.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0
+#define av_assert1(cond) av_assert0(cond)
+#else
+#define av_assert1(cond) ((void)0)
+#endif
+
+
+/**
+ * assert() equivalent, that does lie in speed critical code.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+#define av_assert2(cond) av_assert0(cond)
+#else
+#define av_assert2(cond) ((void)0)
+#endif
+
+#endif /* AVUTIL_AVASSERT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/avconfig.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avconfig.h
new file mode 100644
index 000000000..f10aa6186
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avconfig.h
@@ -0,0 +1,6 @@
+/* Generated by ffconf */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 1
+#endif /* AVUTIL_AVCONFIG_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/avstring.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avstring.h
new file mode 100644
index 000000000..acd6610d3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avstring.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2007 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVSTRING_H
+#define AVUTIL_AVSTRING_H
+
+#include <stddef.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_string
+ * @{
+ */
+
+/**
+ * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
+ * the address of the first character in str after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_strstart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Return non-zero if pfx is a prefix of str independent of case. If
+ * it is, *ptr is set to the address of the first character in str
+ * after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_stristart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Locate the first case-independent occurrence in the string haystack
+ * of the string needle. A zero-length string needle is considered to
+ * match at the start of haystack.
+ *
+ * This function is a case-insensitive version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_stristr(const char *haystack, const char *needle);
+
+/**
+ * Copy the string src to dst, but no more than size - 1 bytes, and
+ * null-terminate dst.
+ *
+ * This function is the same as BSD strlcpy().
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the length of src
+ *
+ * @warning since the return value is the length of src, src absolutely
+ * _must_ be a properly 0-terminated string, otherwise this will read beyond
+ * the end of the buffer and possibly crash.
+ */
+size_t av_strlcpy(char *dst, const char *src, size_t size);
+
+/**
+ * Append the string src to the string dst, but to a total length of
+ * no more than size - 1 bytes, and null-terminate dst.
+ *
+ * This function is similar to BSD strlcat(), but differs when
+ * size <= strlen(dst).
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the total length of src and dst
+ *
+ * @warning since the return value use the length of src and dst, these
+ * absolutely _must_ be a properly 0-terminated strings, otherwise this
+ * will read beyond the end of the buffer and possibly crash.
+ */
+size_t av_strlcat(char *dst, const char *src, size_t size);
+
+/**
+ * Append output to a string, according to a format. Never write out of
+ * the destination buffer, and always put a terminating 0 within
+ * the buffer.
+ * @param dst destination buffer (string to which the output is
+ * appended)
+ * @param size total size of the destination buffer
+ * @param fmt printf-compatible format string, specifying how the
+ * following parameters are used
+ * @return the length of the string that would have been generated
+ * if enough space had been available
+ */
+size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Convert a number to a av_malloced string.
+ */
+char *av_d2str(double d);
+
+/**
+ * Unescape the given string until a non escaped terminating char,
+ * and return the token corresponding to the unescaped string.
+ *
+ * The normal \ and ' escaping is supported. Leading and trailing
+ * whitespaces are removed, unless they are escaped with '\' or are
+ * enclosed between ''.
+ *
+ * @param buf the buffer to parse, buf will be updated to point to the
+ * terminating char
+ * @param term a 0-terminated list of terminating chars
+ * @return the malloced unescaped string, which must be av_freed by
+ * the user, NULL in case of allocation failure
+ */
+char *av_get_token(const char **buf, const char *term);
+
+/**
+ * Locale-independent conversion of ASCII characters to uppercase.
+ */
+static inline int av_toupper(int c)
+{
+ if (c >= 'a' && c <= 'z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII characters to lowercase.
+ */
+static inline int av_tolower(int c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c ^= 0x20;
+ return c;
+}
+
+/*
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strcasecmp(const char *a, const char *b);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strncasecmp(const char *a, const char *b, size_t n);
+
+
+/**
+ * Thread safe basename.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return pointer to the basename substring.
+ */
+const char *av_basename(const char *path);
+
+/**
+ * Thread safe dirname.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return the path with the separator replaced by the string terminator or ".".
+ * @note the function may change the input string.
+ */
+const char *av_dirname(char *path);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AVSTRING_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/avutil.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avutil.h
new file mode 100644
index 000000000..33f9bea72
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/avutil.h
@@ -0,0 +1,275 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * external API header
+ */
+
+/**
+ * @mainpage
+ *
+ * @section libav_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by Libav.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @subpage libavfilter graph based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lavr "libavresample" audio resampling, format conversion and mixing
+ * @li @subpage libswscale color conversion and scaling library
+ */
+
+/**
+ * @defgroup lavu Common utility functions
+ *
+ * @brief
+ * libavutil contains the code shared across all the other Libav
+ * libraries
+ *
+ * @note In order to use the functions provided by avutil you must include
+ * the specific header.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Maths
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup lavu_internal Internal
+ *
+ * Not exported functions, for internal usage only
+ *
+ * @{
+ *
+ * @}
+ */
+
+
+/**
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup version_utils Library Version Macros
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c)
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char *avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char *avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256*128-1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * Libav internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE INT64_C(0x8000000000000000)
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE 1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_I = 1, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "error.h"
+#include "version.h"
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/base64.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/base64.h
new file mode 100644
index 000000000..4750cf5c7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/base64.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BASE64_H
+#define AVUTIL_BASE64_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_base64 Base64
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+
+/**
+ * Decode a base64-encoded string.
+ *
+ * @param out buffer for decoded data
+ * @param in null-terminated input string
+ * @param out_size size in bytes of the out buffer, must be at
+ * least 3/4 of the length of in
+ * @return number of bytes written, or a negative value in case of
+ * invalid input
+ */
+int av_base64_decode(uint8_t *out, const char *in, int out_size);
+
+/**
+ * Encode data to base64 and null-terminate.
+ *
+ * @param out buffer for encoded data
+ * @param out_size size in bytes of the output buffer, must be at
+ * least AV_BASE64_SIZE(in_size)
+ * @param in_size size in bytes of the 'in' buffer
+ * @return 'out' or NULL in case of error
+ */
+char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);
+
+/**
+ * Calculate the output size needed to base64-encode x bytes.
+ */
+#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1)
+
+ /**
+ * @}
+ */
+
+#endif /* AVUTIL_BASE64_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/blowfish.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/blowfish.h
new file mode 100644
index 000000000..8c29536cf
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/blowfish.h
@@ -0,0 +1,76 @@
+/*
+ * Blowfish algorithm
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BLOWFISH_H
+#define AVUTIL_BLOWFISH_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_blowfish Blowfish
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#define AV_BF_ROUNDS 16
+
+typedef struct AVBlowfish {
+ uint32_t p[AV_BF_ROUNDS + 2];
+ uint32_t s[4][256];
+} AVBlowfish;
+
+/**
+ * Initialize an AVBlowfish context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param key a key
+ * @param key_len length of the key
+ */
+void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param xl left four bytes halves of input to be encrypted
+ * @param xr right four bytes halves of input to be encrypted
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,
+ int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BLOWFISH_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/bswap.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/bswap.h
new file mode 100644
index 000000000..8a350e1cd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/bswap.h
@@ -0,0 +1,109 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * byte swapping routines
+ */
+
+#ifndef AVUTIL_BSWAP_H
+#define AVUTIL_BSWAP_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_ARM
+# include "arm/bswap.h"
+#elif ARCH_AVR32
+# include "avr32/bswap.h"
+#elif ARCH_BFIN
+# include "bfin/bswap.h"
+#elif ARCH_SH4
+# include "sh4/bswap.h"
+#elif ARCH_X86
+# include "x86/bswap.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff))
+#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))
+#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))
+
+#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)
+
+#ifndef av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+ x= (x>>8) | (x<<8);
+ return x;
+}
+#endif
+
+#ifndef av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return AV_BSWAP32C(x);
+}
+#endif
+
+#ifndef av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
+}
+#endif
+
+// be2ne ... big-endian to native-endian
+// le2ne ... little-endian to native-endian
+
+#if AV_HAVE_BIGENDIAN
+#define av_be2ne16(x) (x)
+#define av_be2ne32(x) (x)
+#define av_be2ne64(x) (x)
+#define av_le2ne16(x) av_bswap16(x)
+#define av_le2ne32(x) av_bswap32(x)
+#define av_le2ne64(x) av_bswap64(x)
+#define AV_BE2NEC(s, x) (x)
+#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)
+#else
+#define av_be2ne16(x) av_bswap16(x)
+#define av_be2ne32(x) av_bswap32(x)
+#define av_be2ne64(x) av_bswap64(x)
+#define av_le2ne16(x) (x)
+#define av_le2ne32(x) (x)
+#define av_le2ne64(x) (x)
+#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)
+#define AV_LE2NEC(s, x) (x)
+#endif
+
+#define AV_BE2NE16C(x) AV_BE2NEC(16, x)
+#define AV_BE2NE32C(x) AV_BE2NEC(32, x)
+#define AV_BE2NE64C(x) AV_BE2NEC(64, x)
+#define AV_LE2NE16C(x) AV_LE2NEC(16, x)
+#define AV_LE2NE32C(x) AV_LE2NEC(32, x)
+#define AV_LE2NE64C(x) AV_LE2NEC(64, x)
+
+#endif /* AVUTIL_BSWAP_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/channel_layout.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/channel_layout.h
new file mode 100644
index 000000000..15b6887a6
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/channel_layout.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * audio channel layout utility functions
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ * @{
+ */
+#define AV_CH_FRONT_LEFT 0x00000001
+#define AV_CH_FRONT_RIGHT 0x00000002
+#define AV_CH_FRONT_CENTER 0x00000004
+#define AV_CH_LOW_FREQUENCY 0x00000008
+#define AV_CH_BACK_LEFT 0x00000010
+#define AV_CH_BACK_RIGHT 0x00000020
+#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
+#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
+#define AV_CH_BACK_CENTER 0x00000100
+#define AV_CH_SIDE_LEFT 0x00000200
+#define AV_CH_SIDE_RIGHT 0x00000400
+#define AV_CH_TOP_CENTER 0x00000800
+#define AV_CH_TOP_FRONT_LEFT 0x00001000
+#define AV_CH_TOP_FRONT_CENTER 0x00002000
+#define AV_CH_TOP_FRONT_RIGHT 0x00004000
+#define AV_CH_TOP_BACK_LEFT 0x00008000
+#define AV_CH_TOP_BACK_CENTER 0x00010000
+#define AV_CH_TOP_BACK_RIGHT 0x00020000
+#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
+#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
+#define AV_CH_WIDE_LEFT 0x0000000080000000ULL
+#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
+#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
+#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
+#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order. */
+#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel convenience macros
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+
+enum AVMatrixEncoding {
+ AV_MATRIX_ENCODING_NONE,
+ AV_MATRIX_ENCODING_DOLBY,
+ AV_MATRIX_ENCODING_DPLII,
+ AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * @}
+ */
+
+/**
+ * Return a channel layout id that matches name, or 0 if no match is found.
+ *
+ * name can be one or several of the following notations,
+ * separated by '+' or '|':
+ * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
+ * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
+ * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
+ * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
+ * - a number of channels, in decimal, optionally followed by 'c', yielding
+ * the default channel layout for that number of channels (@see
+ * av_get_default_channel_layout);
+ * - a channel layout mask, in hexadecimal starting with "0x" (see the
+ * AV_CH_* macros).
+ *
+ * Example: "stereo+FC" = "2+FC" = "2c+1c" = "0x7"
+ */
+uint64_t av_get_channel_layout(const char *name);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ */
+void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ */
+int av_get_channel_layout_nb_channels(uint64_t channel_layout);
+
+/**
+ * Return default channel layout for a given number of channels.
+ */
+uint64_t av_get_default_channel_layout(int nb_channels);
+
+/**
+ * Get the index of a channel in channel_layout.
+ *
+ * @param channel a channel layout describing exactly one channel which must be
+ * present in channel_layout.
+ *
+ * @return index of channel in channel_layout on success, a negative AVERROR
+ * on error.
+ */
+int av_get_channel_layout_channel_index(uint64_t channel_layout,
+ uint64_t channel);
+
+/**
+ * Get the channel with the given index in channel_layout.
+ */
+uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
+
+/**
+ * Get the name of a given channel.
+ *
+ * @return channel name on success, NULL on error.
+ */
+const char *av_get_channel_name(uint64_t channel);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/common.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/common.h
new file mode 100644
index 000000000..cc4df16e4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/common.h
@@ -0,0 +1,406 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#include <ctype.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "version.h"
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))
+
+/* misc math functions */
+
+#if FF_API_AV_REVERSE
+extern attribute_deprecated const uint8_t av_reverse[256];
+#endif
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+#endif
+
+/* Pull in unguarded fallback defines at the end of this file. */
+#include "common.h"
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
+{
+ if (a&(~0xFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a)
+{
+ if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
+{
+ if (a&(~0xFFFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a)
+{
+ if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
+ else return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
+{
+ if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (a>>63) ^ 0x7FFFFFFF;
+ else return a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+ if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);
+ else return a;
+}
+
+/**
+ * Add two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_add32_c(int a, int b)
+{
+ return av_clipl_int32((int64_t)a + b);
+}
+
+/**
+ * Add a doubled value to another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and added to a
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_dadd32_c(int a, int b)
+{
+ return av_sat_add32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin, float amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x)
+{
+ return av_log2((x - 1) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x)
+{
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x)
+{
+ return av_popcount(x) + av_popcount(x >> 32);
+}
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR)\
+ val= GET_BYTE;\
+ {\
+ uint32_t top = (val & 128) >> 1;\
+ if ((val & 0xc0) == 0x80)\
+ ERROR\
+ while (val & top) {\
+ int tmp= GET_BYTE - 128;\
+ if(tmp>>6)\
+ ERROR\
+ val= (val<<6) + tmp;\
+ top <<= 5;\
+ }\
+ val &= (top << 1) - 1;\
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR)\
+ val = GET_16BIT;\
+ {\
+ unsigned int hi = val - 0xD800;\
+ if (hi < 0x800) {\
+ val = GET_16BIT - 0xDC00;\
+ if (val > 0x3FFU || hi > 0x3FFU)\
+ ERROR\
+ val += (hi<<10) + 0x10000;\
+ }\
+ }\
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+ {\
+ int bytes, shift;\
+ uint32_t in = val;\
+ if (in < 0x80) {\
+ tmp = in;\
+ PUT_BYTE\
+ } else {\
+ bytes = (av_log2(in) + 4) / 5;\
+ shift = (bytes - 1) * 6;\
+ tmp = (256 - (256 >> bytes)) | (in >> shift);\
+ PUT_BYTE\
+ while (shift >= 6) {\
+ shift -= 6;\
+ tmp = 0x80 | ((in >> shift) & 0x3f);\
+ PUT_BYTE\
+ }\
+ }\
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT)\
+ {\
+ uint32_t in = val;\
+ if (in < 0x10000) {\
+ tmp = in;\
+ PUT_16BIT\
+ } else {\
+ tmp = 0xD800 | ((in - 0x10000) >> 10);\
+ PUT_16BIT\
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
+ PUT_16BIT\
+ }\
+ }\
+
+
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
+
+/*
+ * The following definitions are outside the multiple inclusion guard
+ * to ensure they are immediately available in intmath.h.
+ */
+
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/cpu.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/cpu.h
new file mode 100644
index 000000000..4929512c6
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/cpu.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include "version.h"
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+ /* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#if FF_API_CPU_FLAG_MMX2
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#endif
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+#define AV_CPU_FLAG_CMOV 0x1000 ///< i686 cmov
+
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+
+#define AV_CPU_FLAG_ARMV5TE (1 << 0)
+#define AV_CPU_FLAG_ARMV6 (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
+#define AV_CPU_FLAG_VFP (1 << 3)
+#define AV_CPU_FLAG_VFPV3 (1 << 4)
+#define AV_CPU_FLAG_NEON (1 << 5)
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Set a mask on flags returned by av_get_cpu_flags().
+ * This function is mainly useful for testing.
+ *
+ * @warning this function is not thread safe.
+ */
+void av_set_cpu_flags_mask(int mask);
+
+/**
+ * Parse CPU flags from a string.
+ *
+ * @return a combination of AV_CPU_* flags, negative on error.
+ */
+int av_parse_cpu_flags(const char *s);
+
+/* The following CPU-specific functions shall not be called directly. */
+int ff_get_cpu_flags_arm(void);
+int ff_get_cpu_flags_ppc(void);
+int ff_get_cpu_flags_x86(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/crc.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/crc.h
new file mode 100644
index 000000000..0540619d2
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/crc.h
@@ -0,0 +1,74 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CRC_H
+#define AVUTIL_CRC_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include "attributes.h"
+
+typedef uint32_t AVCRC;
+
+typedef enum {
+ AV_CRC_8_ATM,
+ AV_CRC_16_ANSI,
+ AV_CRC_16_CCITT,
+ AV_CRC_32_IEEE,
+ AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
+ AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
+}AVCRCId;
+
+/**
+ * Initialize a CRC table.
+ * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024
+ * @param le If 1, the lowest bit represents the coefficient for the highest
+ * exponent of the corresponding polynomial (both for poly and
+ * actual CRC).
+ * If 0, you must swap the CRC parameter and the result of av_crc
+ * if you need the standard representation (can be simplified in
+ * most cases to e.g. bswap16):
+ * av_bswap32(crc << (32-bits))
+ * @param bits number of bits for the CRC
+ * @param poly generator polynomial without the x**bits coefficient, in the
+ * representation as specified by le
+ * @param ctx_size size of ctx in bytes
+ * @return <0 on failure
+ */
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
+
+/**
+ * Get an initialized standard CRC table.
+ * @param crc_id ID of a standard CRC
+ * @return a pointer to the CRC table or NULL on failure
+ */
+const AVCRC *av_crc_get_table(AVCRCId crc_id);
+
+/**
+ * Calculate the CRC of a block.
+ * @param crc CRC of previous blocks if any or initial value for CRC
+ * @return CRC updated with the data from the given block
+ *
+ * @see av_crc_init() "le" parameter
+ */
+uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
+ const uint8_t *buffer, size_t length) av_pure;
+
+#endif /* AVUTIL_CRC_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/dict.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/dict.h
new file mode 100644
index 000000000..492da9a41
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/dict.h
@@ -0,0 +1,129 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key:value pairs. To create
+ * an AVDictionary, simply pass an address of a NULL pointer to
+ * av_dict_set(). NULL can be used as an empty dictionary wherever
+ * a pointer to an AVDictionary is required.
+ * Use av_dict_get() to retrieve an entry or iterate over all
+ * entries and finally av_dict_free() to free the dictionary
+ * and all its contents.
+ *
+ * @code
+ * AVDictionary *d = NULL; // "create" an empty dictionary
+ * av_dict_set(&d, "foo", "bar", 0); // add an entry
+ *
+ * char *k = av_strdup("key"); // if your strings are already allocated,
+ * char *v = av_strdup("value"); // you can avoid copying them like this
+ * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+ *
+ * AVDictionaryEntry *t = NULL;
+ * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
+ * <....> // iterate over all entries in d
+ * }
+ *
+ * av_dict_free(&d);
+ * @endcode
+ *
+ */
+
+#define AV_DICT_MATCH_CASE 1
+#define AV_DICT_IGNORE_SUFFIX 2
+#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
+ allocated with av_malloc() and children. */
+#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
+ allocated with av_malloc() and chilren. */
+#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
+#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
+ delimiter is added, the strings are simply concatenated. */
+
+typedef struct AVDictionaryEntry {
+ char *key;
+ char *value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param flags Allows case as well as suffix-insensitive comparisons.
+ * @return Found entry or NULL, changing key or value leads to undefined behavior.
+ */
+AVDictionaryEntry *
+av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary *m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key entry key to add to *pm (will be av_strduped depending on flags)
+ * @param value entry value to add to *pm (will be av_strduped depending on flags).
+ * Passing a NULL value will cause an existing entry to be deleted.
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src pointer to source AVDictionary struct
+ * @param flags flags to use when setting entries in *dst
+ * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ */
+void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary **m);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/error.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/error.h
new file mode 100644
index 000000000..3dfd8807f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/error.h
@@ -0,0 +1,83 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include <errno.h>
+#include <stddef.h>
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+
+/* error handling */
+#if EDOM > 0
+#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions.
+#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+
+#define AVERROR_BSF_NOT_FOUND (-0x39acbd08) ///< Bitstream filter not found
+#define AVERROR_DECODER_NOT_FOUND (-0x3cbabb08) ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND (-0x32babb08) ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND (-0x3cb1ba08) ///< Encoder not found
+#define AVERROR_EOF (-0x5fb9b0bb) ///< End of file
+#define AVERROR_EXIT (-0x2bb6a7bb) ///< Immediate exit was requested; the called function should not be restarted
+#define AVERROR_FILTER_NOT_FOUND (-0x33b6b908) ///< Filter not found
+#define AVERROR_INVALIDDATA (-0x3ebbb1b7) ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND (-0x27aab208) ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND (-0x2bafb008) ///< Option not found
+#define AVERROR_PATCHWELCOME (-0x3aa8beb0) ///< Not yet implemented in Libav, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND (-0x30adaf08) ///< Protocol not found
+#define AVERROR_STREAM_NOT_FOUND (-0x2dabac08) ///< Stream not found
+#define AVERROR_BUG (-0x5fb8aabe) ///< Bug detected, please report the issue
+#define AVERROR_UNKNOWN (-0x31b4b1ab) ///< Unknown error, typically from an external library
+#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum error code to describe
+ * @param errbuf buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/eval.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/eval.h
new file mode 100644
index 000000000..ccb29e7a3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/eval.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple arithmetic expression evaluator
+ */
+
+#ifndef AVUTIL_EVAL_H
+#define AVUTIL_EVAL_H
+
+#include "avutil.h"
+
+typedef struct AVExpr AVExpr;
+
+/**
+ * Parse and evaluate an expression.
+ * Note, this is significantly slower than av_expr_eval().
+ *
+ * @param res a pointer to a double where is put the result value of
+ * the expression, or NAN in case of error
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param const_values a zero terminated array of values for the identifiers from const_names
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @param log_ctx parent logging context
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse_and_eval(double *res, const char *s,
+ const char * const *const_names, const double *const_values,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ void *opaque, int log_offset, void *log_ctx);
+
+/**
+ * Parse an expression.
+ *
+ * @param expr a pointer where is put an AVExpr containing the parsed
+ * value in case of successful parsing, or NULL otherwise.
+ * The pointed to AVExpr must be freed with av_expr_free() by the user
+ * when it is not needed anymore.
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param log_ctx parent logging context
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse(AVExpr **expr, const char *s,
+ const char * const *const_names,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ int log_offset, void *log_ctx);
+
+/**
+ * Evaluate a previously parsed expression.
+ *
+ * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @return the value of the expression
+ */
+double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
+
+/**
+ * Free a parsed expression previously created with av_expr_parse().
+ */
+void av_expr_free(AVExpr *e);
+
+/**
+ * Parse the string in numstr and return its value as a double. If
+ * the string is empty, contains only whitespaces, or does not contain
+ * an initial substring that has the expected syntax for a
+ * floating-point number, no conversion is performed. In this case,
+ * returns a value of zero and the value returned in tail is the value
+ * of numstr.
+ *
+ * @param numstr a string representing a number, may contain one of
+ * the International System number postfixes, for example 'K', 'M',
+ * 'G'. If 'i' is appended after the postfix, powers of 2 are used
+ * instead of powers of 10. The 'B' postfix multiplies the value for
+ * 8, and can be appended after another postfix or used alone. This
+ * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
+ * @param tail if non-NULL puts here the pointer to the char next
+ * after the last parsed character
+ */
+double av_strtod(const char *numstr, char **tail);
+
+#endif /* AVUTIL_EVAL_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/fifo.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/fifo.h
new file mode 100644
index 000000000..ea30f5d2b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/fifo.h
@@ -0,0 +1,131 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a very simple circular buffer FIFO implementation
+ */
+
+#ifndef AVUTIL_FIFO_H
+#define AVUTIL_FIFO_H
+
+#include <stdint.h>
+#include "avutil.h"
+#include "attributes.h"
+
+typedef struct AVFifoBuffer {
+ uint8_t *buffer;
+ uint8_t *rptr, *wptr, *end;
+ uint32_t rndx, wndx;
+} AVFifoBuffer;
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param size of FIFO
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ */
+AVFifoBuffer *av_fifo_alloc(unsigned int size);
+
+/**
+ * Free an AVFifoBuffer.
+ * @param f AVFifoBuffer to free
+ */
+void av_fifo_free(AVFifoBuffer *f);
+
+/**
+ * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
+ * @param f AVFifoBuffer to reset
+ */
+void av_fifo_reset(AVFifoBuffer *f);
+
+/**
+ * Return the amount of data in bytes in the AVFifoBuffer, that is the
+ * amount of data you can read from it.
+ * @param f AVFifoBuffer to read from
+ * @return size
+ */
+int av_fifo_size(AVFifoBuffer *f);
+
+/**
+ * Return the amount of space in bytes in the AVFifoBuffer, that is the
+ * amount of data you can write into it.
+ * @param f AVFifoBuffer to write into
+ * @return size
+ */
+int av_fifo_space(AVFifoBuffer *f);
+
+/**
+ * Feed data from an AVFifoBuffer to a user-supplied callback.
+ * @param f AVFifoBuffer to read from
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ */
+int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from a user-supplied callback to an AVFifoBuffer.
+ * @param f AVFifoBuffer to write to
+ * @param src data source; non-const since it may be used as a
+ * modifiable context by the function defined in func
+ * @param size number of bytes to write
+ * @param func generic write function; the first parameter is src,
+ * the second is dest_buf, the third is dest_buf_size.
+ * func must return the number of bytes written to dest_buf, or <= 0 to
+ * indicate no more data available to write.
+ * If func is NULL, src is interpreted as a simple byte array for source data.
+ * @return the number of bytes written to the FIFO
+ */
+int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
+
+/**
+ * Resize an AVFifoBuffer.
+ * @param f AVFifoBuffer to resize
+ * @param size new AVFifoBuffer size in bytes
+ * @return <0 for failure, >=0 otherwise
+ */
+int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
+
+/**
+ * Read and discard the specified amount of data from an AVFifoBuffer.
+ * @param f AVFifoBuffer to read from
+ * @param size amount of data to read in bytes
+ */
+void av_fifo_drain(AVFifoBuffer *f, int size);
+
+/**
+ * Return a pointer to the data stored in a FIFO buffer at a certain offset.
+ * The FIFO buffer is not modified.
+ *
+ * @param f AVFifoBuffer to peek at, f must be non-NULL
+ * @param offs an offset in bytes, its absolute value must be less
+ * than the used buffer size or the returned pointer will
+ * point outside to the buffer data.
+ * The used buffer size can be checked with av_fifo_size().
+ */
+static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
+{
+ uint8_t *ptr = f->rptr + offs;
+ if (ptr >= f->end)
+ ptr = f->buffer + (ptr - f->end);
+ else if (ptr < f->buffer)
+ ptr = f->end - (f->buffer - ptr);
+ return ptr;
+}
+
+#endif /* AVUTIL_FIFO_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/file.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/file.h
new file mode 100644
index 000000000..e3f02a830
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/file.h
@@ -0,0 +1,54 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FILE_H
+#define AVUTIL_FILE_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+
+/**
+ * @file
+ * Misc file utilities.
+ */
+
+/**
+ * Read the file with name filename, and put its content in a newly
+ * allocated buffer or map it with mmap() when available.
+ * In case of success set *bufptr to the read or mmapped buffer, and
+ * *size to the size in bytes of the buffer in *bufptr.
+ * The returned buffer must be released with av_file_unmap().
+ *
+ * @param log_offset loglevel offset used for logging
+ * @param log_ctx context used for logging
+ * @return a non negative number in case of success, a negative value
+ * corresponding to an AVERROR error code in case of failure
+ */
+int av_file_map(const char *filename, uint8_t **bufptr, size_t *size,
+ int log_offset, void *log_ctx);
+
+/**
+ * Unmap or free the buffer bufptr created by av_file_map().
+ *
+ * @param size size in bytes of bufptr, must be the same as returned
+ * by av_file_map()
+ */
+void av_file_unmap(uint8_t *bufptr, size_t size);
+
+#endif /* AVUTIL_FILE_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/imgutils.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/imgutils.h
new file mode 100644
index 000000000..71510132a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/imgutils.h
@@ -0,0 +1,138 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_IMGUTILS_H
+#define AVUTIL_IMGUTILS_H
+
+/**
+ * @file
+ * misc image utilities
+ *
+ * @addtogroup lavu_picture
+ * @{
+ */
+
+#include "avutil.h"
+#include "pixdesc.h"
+
+/**
+ * Compute the max pixel step for each plane of an image with a
+ * format described by pixdesc.
+ *
+ * The pixel step is the distance in bytes between the first byte of
+ * the group of bytes which describe a pixel component and the first
+ * byte of the successive group in the same plane for the same
+ * component.
+ *
+ * @param max_pixsteps an array which is filled with the max pixel step
+ * for each plane. Since a plane may contain different pixel
+ * components, the computed max_pixsteps[plane] is relative to the
+ * component in the plane with the max pixel step.
+ * @param max_pixstep_comps an array which is filled with the component
+ * for each plane which has the max pixel step. May be NULL.
+ */
+void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
+ const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Compute the size of an image line with format pix_fmt and width
+ * width for the plane plane.
+ *
+ * @return the computed size in bytes
+ */
+int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);
+
+/**
+ * Fill plane linesizes for an image with pixel format pix_fmt and
+ * width width.
+ *
+ * @param linesizes array to be filled with the linesize for each plane
+ * @return >= 0 in case of success, a negative error code otherwise
+ */
+int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);
+
+/**
+ * Fill plane data pointers for an image with pixel format pix_fmt and
+ * height height.
+ *
+ * @param data pointers array to be filled with the pointer for each image plane
+ * @param ptr the pointer to a buffer which will contain the image
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
+ uint8_t *ptr, const int linesizes[4]);
+
+/**
+ * Allocate an image with size w and h and pixel format pix_fmt, and
+ * fill pointers and linesizes accordingly.
+ * The allocated image buffer has to be freed by using
+ * av_freep(&pointers[0]).
+ *
+ * @param align the value to use for buffer size alignment
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
+ int w, int h, enum AVPixelFormat pix_fmt, int align);
+
+/**
+ * Copy image plane from src to dst.
+ * That is, copy "height" number of lines of "bytewidth" bytes each.
+ * The first byte of each successive line is separated by *_linesize
+ * bytes.
+ *
+ * @param dst_linesize linesize for the image plane in dst
+ * @param src_linesize linesize for the image plane in src
+ */
+void av_image_copy_plane(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int bytewidth, int height);
+
+/**
+ * Copy image in src_data to dst_data.
+ *
+ * @param dst_linesizes linesizes for the image in dst_data
+ * @param src_linesizes linesizes for the image in src_data
+ */
+void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
+ const uint8_t *src_data[4], const int src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Check if the given dimension of an image is valid, meaning that all
+ * bytes of the image can be addressed with a signed int.
+ *
+ * @param w the width of the picture
+ * @param h the height of the picture
+ * @param log_offset the offset to sum to the log level for logging with log_ctx
+ * @param log_ctx the parent logging context, it may be NULL
+ * @return >= 0 if valid, a negative error code otherwise
+ */
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);
+
+int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt);
+
+/**
+ * @}
+ */
+
+
+#endif /* AVUTIL_IMGUTILS_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat.h
new file mode 100644
index 000000000..38d26ad87
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+union av_intfloat32 {
+ uint32_t i;
+ float f;
+};
+
+union av_intfloat64 {
+ uint64_t i;
+ double f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i)
+{
+ union av_intfloat32 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f)
+{
+ union av_intfloat32 v;
+ v.f = f;
+ return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i)
+{
+ union av_intfloat64 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f)
+{
+ union av_intfloat64 v;
+ v.f = f;
+ return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat_readwrite.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat_readwrite.h
new file mode 100644
index 000000000..f093b92cd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/intfloat_readwrite.h
@@ -0,0 +1,40 @@
+/*
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_READWRITE_H
+#define AVUTIL_INTFLOAT_READWRITE_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+/* IEEE 80 bits extended float */
+typedef struct AVExtFloat {
+ uint8_t exponent[2];
+ uint8_t mantissa[8];
+} AVExtFloat;
+
+attribute_deprecated double av_int2dbl(int64_t v) av_const;
+attribute_deprecated float av_int2flt(int32_t v) av_const;
+attribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const;
+attribute_deprecated int64_t av_dbl2int(double d) av_const;
+attribute_deprecated int32_t av_flt2int(float d) av_const;
+attribute_deprecated AVExtFloat av_dbl2ext(double d) av_const;
+
+#endif /* AVUTIL_INTFLOAT_READWRITE_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/intreadwrite.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/intreadwrite.h
new file mode 100644
index 000000000..f77fd60f3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/intreadwrite.h
@@ -0,0 +1,549 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTREADWRITE_H
+#define AVUTIL_INTREADWRITE_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+#include "bswap.h"
+
+typedef union {
+ uint64_t u64;
+ uint32_t u32[2];
+ uint16_t u16[4];
+ uint8_t u8 [8];
+ double f64;
+ float f32[2];
+} av_alias av_alias64;
+
+typedef union {
+ uint32_t u32;
+ uint16_t u16[2];
+ uint8_t u8 [4];
+ float f32;
+} av_alias av_alias32;
+
+typedef union {
+ uint16_t u16;
+ uint8_t u8 [2];
+} av_alias av_alias16;
+
+/*
+ * Arch-specific headers can provide any combination of
+ * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
+ * Preprocessor symbols must be defined, even if these are implemented
+ * as inline functions.
+ */
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_ARM
+# include "arm/intreadwrite.h"
+#elif ARCH_AVR32
+# include "avr32/intreadwrite.h"
+#elif ARCH_MIPS
+# include "mips/intreadwrite.h"
+#elif ARCH_PPC
+# include "ppc/intreadwrite.h"
+#elif ARCH_TOMI
+# include "tomi/intreadwrite.h"
+#elif ARCH_X86
+# include "x86/intreadwrite.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+/*
+ * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
+ */
+
+#if AV_HAVE_BIGENDIAN
+
+# if defined(AV_RN16) && !defined(AV_RB16)
+# define AV_RB16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RB16)
+# define AV_RN16(p) AV_RB16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WB16)
+# define AV_WB16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WB16)
+# define AV_WN16(p, v) AV_WB16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RB24)
+# define AV_RB24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RB24)
+# define AV_RN24(p) AV_RB24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WB24)
+# define AV_WB24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WB24)
+# define AV_WN24(p, v) AV_WB24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RB32)
+# define AV_RB32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RB32)
+# define AV_RN32(p) AV_RB32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WB32)
+# define AV_WB32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WB32)
+# define AV_WN32(p, v) AV_WB32(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RB64)
+# define AV_RB64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RB64)
+# define AV_RN64(p) AV_RB64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WB64)
+# define AV_WB64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WB64)
+# define AV_WN64(p, v) AV_WB64(p, v)
+# endif
+
+#else /* AV_HAVE_BIGENDIAN */
+
+# if defined(AV_RN16) && !defined(AV_RL16)
+# define AV_RL16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RL16)
+# define AV_RN16(p) AV_RL16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WL16)
+# define AV_WL16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WL16)
+# define AV_WN16(p, v) AV_WL16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RL24)
+# define AV_RL24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RL24)
+# define AV_RN24(p) AV_RL24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WL24)
+# define AV_WL24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WL24)
+# define AV_WN24(p, v) AV_WL24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RL32)
+# define AV_RL32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RL32)
+# define AV_RN32(p) AV_RL32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WL32)
+# define AV_WL32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WL32)
+# define AV_WN32(p, v) AV_WL32(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RL64)
+# define AV_RL64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RL64)
+# define AV_RN64(p) AV_RL64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WL64)
+# define AV_WL64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WL64)
+# define AV_WN64(p, v) AV_WL64(p, v)
+# endif
+
+#endif /* !AV_HAVE_BIGENDIAN */
+
+/*
+ * Define AV_[RW]N helper macros to simplify definitions not provided
+ * by per-arch headers.
+ */
+
+#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)
+
+union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
+union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
+union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
+
+# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
+# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
+
+#elif defined(__DECC)
+
+# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
+# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
+
+#elif AV_HAVE_FAST_UNALIGNED
+
+# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
+# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#else
+
+#ifndef AV_RB16
+# define AV_RB16(x) \
+ ((((const uint8_t*)(x))[0] << 8) | \
+ ((const uint8_t*)(x))[1])
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, d) do { \
+ ((uint8_t*)(p))[1] = (d); \
+ ((uint8_t*)(p))[0] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(x) \
+ ((((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(x) \
+ (((uint32_t)((const uint8_t*)(x))[0] << 24) | \
+ (((const uint8_t*)(x))[1] << 16) | \
+ (((const uint8_t*)(x))[2] << 8) | \
+ ((const uint8_t*)(x))[3])
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, d) do { \
+ ((uint8_t*)(p))[3] = (d); \
+ ((uint8_t*)(p))[2] = (d)>>8; \
+ ((uint8_t*)(p))[1] = (d)>>16; \
+ ((uint8_t*)(p))[0] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(x) \
+ (((uint32_t)((const uint8_t*)(x))[3] << 24) | \
+ (((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(x) \
+ (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[7])
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, d) do { \
+ ((uint8_t*)(p))[7] = (d); \
+ ((uint8_t*)(p))[6] = (d)>>8; \
+ ((uint8_t*)(p))[5] = (d)>>16; \
+ ((uint8_t*)(p))[4] = (d)>>24; \
+ ((uint8_t*)(p))[3] = (d)>>32; \
+ ((uint8_t*)(p))[2] = (d)>>40; \
+ ((uint8_t*)(p))[1] = (d)>>48; \
+ ((uint8_t*)(p))[0] = (d)>>56; \
+ } while(0)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(x) \
+ (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ ((uint8_t*)(p))[4] = (d)>>32; \
+ ((uint8_t*)(p))[5] = (d)>>40; \
+ ((uint8_t*)(p))[6] = (d)>>48; \
+ ((uint8_t*)(p))[7] = (d)>>56; \
+ } while(0)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RN(s, p) AV_RB##s(p)
+# define AV_WN(s, p, v) AV_WB##s(p, v)
+#else
+# define AV_RN(s, p) AV_RL##s(p)
+# define AV_WN(s, p, v) AV_WL##s(p, v)
+#endif
+
+#endif /* HAVE_FAST_UNALIGNED */
+
+#ifndef AV_RN16
+# define AV_RN16(p) AV_RN(16, p)
+#endif
+
+#ifndef AV_RN32
+# define AV_RN32(p) AV_RN(32, p)
+#endif
+
+#ifndef AV_RN64
+# define AV_RN64(p) AV_RN(64, p)
+#endif
+
+#ifndef AV_WN16
+# define AV_WN16(p, v) AV_WN(16, p, v)
+#endif
+
+#ifndef AV_WN32
+# define AV_WN32(p, v) AV_WN(32, p, v)
+#endif
+
+#ifndef AV_WN64
+# define AV_WN64(p, v) AV_WN(64, p, v)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RB(s, p) AV_RN##s(p)
+# define AV_WB(s, p, v) AV_WN##s(p, v)
+# define AV_RL(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
+#else
+# define AV_RB(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
+# define AV_RL(s, p) AV_RN##s(p)
+# define AV_WL(s, p, v) AV_WN##s(p, v)
+#endif
+
+#define AV_RB8(x) (((const uint8_t*)(x))[0])
+#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
+
+#define AV_RL8(x) AV_RB8(x)
+#define AV_WL8(p, d) AV_WB8(p, d)
+
+#ifndef AV_RB16
+# define AV_RB16(p) AV_RB(16, p)
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, v) AV_WB(16, p, v)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(p) AV_RL(16, p)
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, v) AV_WL(16, p, v)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(p) AV_RB(32, p)
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, v) AV_WB(32, p, v)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(p) AV_RL(32, p)
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, v) AV_WL(32, p, v)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(p) AV_RB(64, p)
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, v) AV_WB(64, p, v)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(p) AV_RL(64, p)
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, v) AV_WL(64, p, v)
+#endif
+
+#ifndef AV_RB24
+# define AV_RB24(x) \
+ ((((const uint8_t*)(x))[0] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[2])
+#endif
+#ifndef AV_WB24
+# define AV_WB24(p, d) do { \
+ ((uint8_t*)(p))[2] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[0] = (d)>>16; \
+ } while(0)
+#endif
+
+#ifndef AV_RL24
+# define AV_RL24(x) \
+ ((((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL24
+# define AV_WL24(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ } while(0)
+#endif
+
+/*
+ * The AV_[RW]NA macros access naturally aligned data
+ * in a type-safe way.
+ */
+
+#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
+#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#ifndef AV_RN16A
+# define AV_RN16A(p) AV_RNA(16, p)
+#endif
+
+#ifndef AV_RN32A
+# define AV_RN32A(p) AV_RNA(32, p)
+#endif
+
+#ifndef AV_RN64A
+# define AV_RN64A(p) AV_RNA(64, p)
+#endif
+
+#ifndef AV_WN16A
+# define AV_WN16A(p, v) AV_WNA(16, p, v)
+#endif
+
+#ifndef AV_WN32A
+# define AV_WN32A(p, v) AV_WNA(32, p, v)
+#endif
+
+#ifndef AV_WN64A
+# define AV_WN64A(p, v) AV_WNA(64, p, v)
+#endif
+
+/*
+ * The AV_COPYxxU macros are suitable for copying data to/from unaligned
+ * memory locations.
+ */
+
+#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));
+
+#ifndef AV_COPY16U
+# define AV_COPY16U(d, s) AV_COPYU(16, d, s)
+#endif
+
+#ifndef AV_COPY32U
+# define AV_COPY32U(d, s) AV_COPYU(32, d, s)
+#endif
+
+#ifndef AV_COPY64U
+# define AV_COPY64U(d, s) AV_COPYU(64, d, s)
+#endif
+
+#ifndef AV_COPY128U
+# define AV_COPY128U(d, s) \
+ do { \
+ AV_COPY64U(d, s); \
+ AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \
+ } while(0)
+#endif
+
+/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
+ * naturally aligned. They may be implemented using MMX,
+ * so emms_c() must be called before using any float code
+ * afterwards.
+ */
+
+#define AV_COPY(n, d, s) \
+ (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
+
+#ifndef AV_COPY16
+# define AV_COPY16(d, s) AV_COPY(16, d, s)
+#endif
+
+#ifndef AV_COPY32
+# define AV_COPY32(d, s) AV_COPY(32, d, s)
+#endif
+
+#ifndef AV_COPY64
+# define AV_COPY64(d, s) AV_COPY(64, d, s)
+#endif
+
+#ifndef AV_COPY128
+# define AV_COPY128(d, s) \
+ do { \
+ AV_COPY64(d, s); \
+ AV_COPY64((char*)(d)+8, (char*)(s)+8); \
+ } while(0)
+#endif
+
+#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
+
+#ifndef AV_SWAP64
+# define AV_SWAP64(a, b) AV_SWAP(64, a, b)
+#endif
+
+#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
+
+#ifndef AV_ZERO16
+# define AV_ZERO16(d) AV_ZERO(16, d)
+#endif
+
+#ifndef AV_ZERO32
+# define AV_ZERO32(d) AV_ZERO(32, d)
+#endif
+
+#ifndef AV_ZERO64
+# define AV_ZERO64(d) AV_ZERO(64, d)
+#endif
+
+#ifndef AV_ZERO128
+# define AV_ZERO128(d) \
+ do { \
+ AV_ZERO64(d); \
+ AV_ZERO64((char*)(d)+8); \
+ } while(0)
+#endif
+
+#endif /* AVUTIL_INTREADWRITE_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/lfg.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/lfg.h
new file mode 100644
index 000000000..5e526c1da
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/lfg.h
@@ -0,0 +1,62 @@
+/*
+ * Lagged Fibonacci PRNG
+ * Copyright (c) 2008 Michael Niedermayer
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LFG_H
+#define AVUTIL_LFG_H
+
+typedef struct AVLFG {
+ unsigned int state[64];
+ int index;
+} AVLFG;
+
+void av_lfg_init(AVLFG *c, unsigned int seed);
+
+/**
+ * Get the next random unsigned 32-bit number using an ALFG.
+ *
+ * Please also consider a simple LCG like state= state*1664525+1013904223,
+ * it may be good enough and faster for your specific use case.
+ */
+static inline unsigned int av_lfg_get(AVLFG *c){
+ c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63];
+ return c->state[c->index++ & 63];
+}
+
+/**
+ * Get the next random unsigned 32-bit number using a MLFG.
+ *
+ * Please also consider av_lfg_get() above, it is faster.
+ */
+static inline unsigned int av_mlfg_get(AVLFG *c){
+ unsigned int a= c->state[(c->index-55) & 63];
+ unsigned int b= c->state[(c->index-24) & 63];
+ return c->state[c->index++ & 63] = 2*a*b+a+b;
+}
+
+/**
+ * Get the next two numbers generated by a Box-Muller Gaussian
+ * generator using the random numbers issued by lfg.
+ *
+ * @param out array where the two generated numbers are placed
+ */
+void av_bmg_get(AVLFG *lfg, double out[2]);
+
+#endif /* AVUTIL_LFG_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/log.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/log.h
new file mode 100644
index 000000000..7b173302f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/log.h
@@ -0,0 +1,173 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption *option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for
+ * logging is stored. For example a decoder could pass its AVCodecContext
+ * to eval as such a parent context, which an av_log() implementation
+ * could then leverage to display the parent context.
+ * The offset can be NULL.
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void *obj, void *prev);
+
+ /**
+ * Return an AVClass corresponding to the next potential
+ * AVOptions-enabled child.
+ *
+ * The difference between child_next and this is that
+ * child_next iterates over _already existing_ objects, while
+ * child_class_next iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_next)(const struct AVClass *prev);
+} AVClass;
+
+/* av_log API */
+
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+#define AV_LOG_INFO 32
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different av_vlog callback
+ * function.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message, lower values signifying
+ * higher importance.
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @see av_vlog
+ */
+void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+
+void av_vlog(void *avcl, int level, const char *fmt, va_list);
+int av_log_get_level(void);
+void av_log_set_level(int);
+void av_log_set_callback(void (*)(void*, int, const char*, va_list));
+void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl);
+const char* av_default_item_name(void* ctx);
+
+/**
+ * av_dlog macros
+ * Useful to print debug messages that shouldn't get compiled in normally.
+ */
+
+#ifdef DEBUG
+# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define av_dlog(pctx, ...)
+#endif
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+void av_log_set_flags(int arg);
+
+#endif /* AVUTIL_LOG_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/lzo.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/lzo.h
new file mode 100644
index 000000000..9d7e8f1dc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/lzo.h
@@ -0,0 +1,66 @@
+/*
+ * LZO 1x decompression
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LZO_H
+#define AVUTIL_LZO_H
+
+/**
+ * @defgroup lavu_lzo LZO
+ * @ingroup lavu_crypto
+ *
+ * @{
+ */
+
+#include <stdint.h>
+
+/** @name Error flags returned by av_lzo1x_decode
+ * @{ */
+/// end of the input buffer reached before decoding finished
+#define AV_LZO_INPUT_DEPLETED 1
+/// decoded data did not fit into output buffer
+#define AV_LZO_OUTPUT_FULL 2
+/// a reference to previously decoded data was wrong
+#define AV_LZO_INVALID_BACKPTR 4
+/// a non-specific error in the compressed bitstream
+#define AV_LZO_ERROR 8
+/** @} */
+
+#define AV_LZO_INPUT_PADDING 8
+#define AV_LZO_OUTPUT_PADDING 12
+
+/**
+ * @brief Decodes LZO 1x compressed data.
+ * @param out output buffer
+ * @param outlen size of output buffer, number of bytes left are returned here
+ * @param in input buffer
+ * @param inlen size of input buffer, number of bytes left are returned here
+ * @return 0 on success, otherwise a combination of the error flags above
+ *
+ * Make sure all buffers are appropriately padded, in must provide
+ * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.
+ */
+int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LZO_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/mathematics.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/mathematics.h
new file mode 100644
index 000000000..043dd0faf
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/mathematics.h
@@ -0,0 +1,111 @@
+/*
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_LOG2_10
+#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_PHI
+#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef NAN
+#define NAN av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+#define INFINITY av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
+};
+
+/**
+ * Return the greatest common divisor of a and b.
+ * If both a and b are 0 or either or both are <0 then behavior is
+ * undefined.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding) av_const;
+
+/**
+ * Compare 2 timestamps each in its own timebases.
+ * The result of the function is undefined if one of the timestamps
+ * is outside the int64_t range when represented in the others timebase.
+ * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare 2 integers modulo mod.
+ * That is we compare integers a and b for which only the least
+ * significant log2(mod) bits are known.
+ *
+ * @param mod must be a power of 2
+ * @return a negative value if a is smaller than b
+ * a positive value if a is greater than b
+ * 0 if a equals b
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/md5.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/md5.h
new file mode 100644
index 000000000..29e4e7c2b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/md5.h
@@ -0,0 +1,51 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MD5_H
+#define AVUTIL_MD5_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_md5 MD5
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#if FF_API_CONTEXT_SIZE
+extern attribute_deprecated const int av_md5_size;
+#endif
+
+struct AVMD5;
+
+struct AVMD5 *av_md5_alloc(void);
+void av_md5_init(struct AVMD5 *ctx);
+void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, const int len);
+void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
+void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MD5_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/mem.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/mem.h
new file mode 100644
index 000000000..8f4722447
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/mem.h
@@ -0,0 +1,183 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "attributes.h"
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_mem
+ * @{
+ */
+
+
+#if defined(__ICC) && __ICC < 1200 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__TI_COMPILER_VERSION__)
+ #define DECLARE_ALIGNED(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ t __attribute__((aligned(n))) v
+ #define DECLARE_ASM_CONST(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ static const t __attribute__((aligned(n))) v
+#elif defined(__GNUC__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+ #define av_malloc_attrib __attribute__((__malloc__))
+#else
+ #define av_malloc_attrib
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+ #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#else
+ #define av_alloc_size(...)
+#endif
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU).
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ */
+void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Helper function to allocate a block of size * nmemb bytes with
+ * using av_malloc()
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_malloc()
+ */
+av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size)
+{
+ if (size <= 0 || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_malloc(nmemb * size);
+}
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_malloc(z)() or av_realloc() or NULL.
+ * @param size Size in bytes for the memory block to be allocated or
+ * reallocated.
+ * @return Pointer to a newly reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @see av_fast_realloc()
+ */
+void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc().
+ * @param ptr Pointer to the memory block which should be freed.
+ * @note ptr = NULL is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead.
+ * @see av_freep()
+ */
+void av_free(void *ptr);
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ * @see av_malloc()
+ */
+void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Helper function to allocate a block of size * nmemb bytes with
+ * using av_mallocz()
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ * @see av_malloc_array()
+ */
+av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size)
+{
+ if (size <= 0 || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_mallocz(nmemb * size);
+}
+
+/**
+ * Duplicate the string s.
+ * @param s string to be duplicated
+ * @return Pointer to a newly allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strdup(const char *s) av_malloc_attrib;
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc() and set the pointer pointing to it to NULL.
+ * @param ptr Pointer to the pointer to the memory block which should
+ * be freed.
+ * @see av_free()
+ */
+void av_freep(void *ptr);
+
+/**
+ * @brief deliberately overlapping memcpy implementation
+ * @param dst destination buffer
+ * @param back how many bytes back we start (the initial size of the overlapping window)
+ * @param cnt number of bytes to copy, must be >= 0
+ *
+ * cnt > back is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of back.
+ */
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/old_pix_fmts.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/old_pix_fmts.h
new file mode 100644
index 000000000..31765aed5
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/old_pix_fmts.h
@@ -0,0 +1,128 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OLD_PIX_FMTS_H
+#define AVUTIL_OLD_PIX_FMTS_H
+
+/*
+ * This header exists to prevent new pixel formats from being accidentally added
+ * to the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVPixelFormat enum instead.
+ */
+ PIX_FMT_NONE = AV_PIX_FMT_NONE,
+ PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ PIX_FMT_GRAY8, ///< Y , 8bpp
+ PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+ PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ PIX_FMT_XVMC_MPEG2_IDCT,
+ PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+ PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ PIX_FMT_Y400A, ///< 8bit gray, 8bit alpha
+ PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+ PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+ PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian
+ PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian
+ PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian
+ PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian
+ PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian
+ PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+
+#endif /* AVUTIL_OLD_PIX_FMTS_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/opt.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/opt.h
new file mode 100644
index 000000000..2d3cc731e
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/opt.h
@@ -0,0 +1,516 @@
+/*
+ * AVOptions
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OPT_H
+#define AVUTIL_OPT_H
+
+/**
+ * @file
+ * AVOptions
+ */
+
+#include "rational.h"
+#include "avutil.h"
+#include "dict.h"
+#include "log.h"
+
+/**
+ * @defgroup avoptions AVOptions
+ * @ingroup lavu_data
+ * @{
+ * AVOptions provide a generic system to declare options on arbitrary structs
+ * ("objects"). An option can have a help text, a type and a range of possible
+ * values. Options may then be enumerated, read and written to.
+ *
+ * @section avoptions_implement Implementing AVOptions
+ * This section describes how to add AVOptions capabilities to a struct.
+ *
+ * All AVOptions-related information is stored in an AVClass. Therefore
+ * the first member of the struct must be a pointer to an AVClass describing it.
+ * The option field of the AVClass must be set to a NULL-terminated static array
+ * of AVOptions. Each AVOption must have a non-empty name, a type, a default
+ * value and for number-type AVOptions also a range of allowed values. It must
+ * also declare an offset in bytes from the start of the struct, where the field
+ * associated with this AVOption is located. Other fields in the AVOption struct
+ * should also be set when applicable, but are not required.
+ *
+ * The following example illustrates an AVOptions-enabled struct:
+ * @code
+ * typedef struct test_struct {
+ * AVClass *class;
+ * int int_opt;
+ * char *str_opt;
+ * uint8_t *bin_opt;
+ * int bin_len;
+ * } test_struct;
+ *
+ * static const AVOption options[] = {
+ * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt),
+ * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX },
+ * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt),
+ * AV_OPT_TYPE_STRING },
+ * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt),
+ * AV_OPT_TYPE_BINARY },
+ * { NULL },
+ * };
+ *
+ * static const AVClass test_class = {
+ * .class_name = "test class",
+ * .item_name = av_default_item_name,
+ * .option = options,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ * @endcode
+ *
+ * Next, when allocating your struct, you must ensure that the AVClass pointer
+ * is set to the correct value. Then, av_opt_set_defaults() must be called to
+ * initialize defaults. After that the struct is ready to be used with the
+ * AVOptions API.
+ *
+ * When cleaning up, you may use the av_opt_free() function to automatically
+ * free all the allocated string and binary options.
+ *
+ * Continuing with the above example:
+ *
+ * @code
+ * test_struct *alloc_test_struct(void)
+ * {
+ * test_struct *ret = av_malloc(sizeof(*ret));
+ * ret->class = &test_class;
+ * av_opt_set_defaults(ret);
+ * return ret;
+ * }
+ * void free_test_struct(test_struct **foo)
+ * {
+ * av_opt_free(*foo);
+ * av_freep(foo);
+ * }
+ * @endcode
+ *
+ * @subsection avoptions_implement_nesting Nesting
+ * It may happen that an AVOptions-enabled struct contains another
+ * AVOptions-enabled struct as a member (e.g. AVCodecContext in
+ * libavcodec exports generic options, while its priv_data field exports
+ * codec-specific options). In such a case, it is possible to set up the
+ * parent struct to export a child's options. To do that, simply
+ * implement AVClass.child_next() and AVClass.child_class_next() in the
+ * parent struct's AVClass.
+ * Assuming that the test_struct from above now also contains a
+ * child_struct field:
+ *
+ * @code
+ * typedef struct child_struct {
+ * AVClass *class;
+ * int flags_opt;
+ * } child_struct;
+ * static const AVOption child_opts[] = {
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX },
+ * { NULL },
+ * };
+ * static const AVClass child_class = {
+ * .class_name = "child class",
+ * .item_name = av_default_item_name,
+ * .option = child_opts,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ *
+ * void *child_next(void *obj, void *prev)
+ * {
+ * test_struct *t = obj;
+ * if (!prev && t->child_struct)
+ * return t->child_struct;
+ * return NULL
+ * }
+ * const AVClass child_class_next(const AVClass *prev)
+ * {
+ * return prev ? NULL : &child_class;
+ * }
+ * @endcode
+ * Putting child_next() and child_class_next() as defined above into
+ * test_class will now make child_struct's options accessible through
+ * test_struct (again, proper setup as described above needs to be done on
+ * child_struct right after it is created).
+ *
+ * From the above example it might not be clear why both child_next()
+ * and child_class_next() are needed. The distinction is that child_next()
+ * iterates over actually existing objects, while child_class_next()
+ * iterates over all possible child classes. E.g. if an AVCodecContext
+ * was initialized to use a codec which has private options, then its
+ * child_next() will return AVCodecContext.priv_data and finish
+ * iterating. OTOH child_class_next() on AVCodecContext.av_class will
+ * iterate over all available codecs with private options.
+ *
+ * @subsection avoptions_implement_named_constants Named constants
+ * It is possible to create named constants for options. Simply set the unit
+ * field of the option the constants should apply to to a string and
+ * create the constants themselves as options of type AV_OPT_TYPE_CONST
+ * with their unit field set to the same string.
+ * Their default_val field should contain the value of the named
+ * constant.
+ * For example, to add some named constants for the test_flags option
+ * above, put the following into the child_opts array:
+ * @code
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" },
+ * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" },
+ * @endcode
+ *
+ * @section avoptions_use Using AVOptions
+ * This section deals with accessing options in an AVOptions-enabled struct.
+ * Such structs in Libav are e.g. AVCodecContext in libavcodec or
+ * AVFormatContext in libavformat.
+ *
+ * @subsection avoptions_use_examine Examining AVOptions
+ * The basic functions for examining options are av_opt_next(), which iterates
+ * over all options defined for one object, and av_opt_find(), which searches
+ * for an option with the given name.
+ *
+ * The situation is more complicated with nesting. An AVOptions-enabled struct
+ * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag
+ * to av_opt_find() will make the function search children recursively.
+ *
+ * For enumerating there are basically two cases. The first is when you want to
+ * get all options that may potentially exist on the struct and its children
+ * (e.g. when constructing documentation). In that case you should call
+ * av_opt_child_class_next() recursively on the parent struct's AVClass. The
+ * second case is when you have an already initialized struct with all its
+ * children and you want to get all options that can be actually written or read
+ * from it. In that case you should call av_opt_child_next() recursively (and
+ * av_opt_next() on each result).
+ *
+ * @subsection avoptions_use_get_set Reading and writing AVOptions
+ * When setting options, you often have a string read directly from the
+ * user. In such a case, simply passing it to av_opt_set() is enough. For
+ * non-string type options, av_opt_set() will parse the string according to the
+ * option type.
+ *
+ * Similarly av_opt_get() will read any option type and convert it to a string
+ * which will be returned. Do not forget that the string is allocated, so you
+ * have to free it with av_free().
+ *
+ * In some cases it may be more convenient to put all options into an
+ * AVDictionary and call av_opt_set_dict() on it. A specific case of this
+ * are the format/codec open functions in lavf/lavc which take a dictionary
+ * filled with option as a parameter. This allows to set some options
+ * that cannot be set otherwise, since e.g. the input file format is not known
+ * before the file is actually opened.
+ */
+
+enum AVOptionType{
+ AV_OPT_TYPE_FLAGS,
+ AV_OPT_TYPE_INT,
+ AV_OPT_TYPE_INT64,
+ AV_OPT_TYPE_DOUBLE,
+ AV_OPT_TYPE_FLOAT,
+ AV_OPT_TYPE_STRING,
+ AV_OPT_TYPE_RATIONAL,
+ AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length
+ AV_OPT_TYPE_CONST = 128,
+};
+
+/**
+ * AVOption
+ */
+typedef struct AVOption {
+ const char *name;
+
+ /**
+ * short English help text
+ * @todo What about other languages?
+ */
+ const char *help;
+
+ /**
+ * The offset relative to the context structure where the option
+ * value is stored. It should be 0 for named constants.
+ */
+ int offset;
+ enum AVOptionType type;
+
+ /**
+ * the default value for scalar options
+ */
+ union {
+ int64_t i64;
+ double dbl;
+ const char *str;
+ /* TODO those are unused now */
+ AVRational q;
+ } default_val;
+ double min; ///< minimum valid value for the option
+ double max; ///< maximum valid value for the option
+
+ int flags;
+#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding
+#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding
+#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ...
+#define AV_OPT_FLAG_AUDIO_PARAM 8
+#define AV_OPT_FLAG_VIDEO_PARAM 16
+#define AV_OPT_FLAG_SUBTITLE_PARAM 32
+//FIXME think about enc-audio, ... style flags
+
+ /**
+ * The logical unit to which the option belongs. Non-constant
+ * options and corresponding named constants share the same
+ * unit. May be NULL.
+ */
+ const char *unit;
+} AVOption;
+
+/**
+ * Show the obj options.
+ *
+ * @param req_flags requested flags for the options to show. Show only the
+ * options for which it is opt->flags & req_flags.
+ * @param rej_flags rejected flags for the options to show. Show only the
+ * options for which it is !(opt->flags & req_flags).
+ * @param av_log_obj log context to use for showing the options
+ */
+int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);
+
+/**
+ * Set the values of all AVOption fields to their default values.
+ *
+ * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)
+ */
+void av_opt_set_defaults(void *s);
+
+/**
+ * Parse the key/value pairs list in opts. For each key/value pair
+ * found, stores the value in the field in ctx that is named like the
+ * key. ctx must be an AVClass context, storing is done using
+ * AVOptions.
+ *
+ * @param key_val_sep a 0-terminated list of characters used to
+ * separate key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @return the number of successfully set key/value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_set_string3() if a key/value pair
+ * cannot be set
+ */
+int av_set_options_string(void *ctx, const char *opts,
+ const char *key_val_sep, const char *pairs_sep);
+
+/**
+ * Free all string and binary options in obj.
+ */
+void av_opt_free(void *obj);
+
+/**
+ * Check whether a particular flag is set in a flags field.
+ *
+ * @param field_name the name of the flag field option
+ * @param flag_name the name of the flag to check
+ * @return non-zero if the flag is set, zero if the flag isn't set,
+ * isn't of the right type, or the flags field doesn't exist.
+ */
+int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);
+
+/*
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ * by a new one containing all options not found in obj.
+ * Of course this new dictionary needs to be freed by caller
+ * with av_dict_free().
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ * but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict(void *obj, struct AVDictionary **options);
+
+/**
+ * @defgroup opt_eval_funcs Evaluating option strings
+ * @{
+ * This group of functions can be used to evaluate option strings
+ * and get numbers out of them. They do the same thing as av_opt_set(),
+ * except the result is written into the caller-supplied pointer.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass.
+ * @param o an option for which the string is to be evaluated.
+ * @param val string to be evaluated.
+ * @param *_out value of the string will be written here.
+ *
+ * @return 0 on success, a negative number on failure.
+ */
+int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out);
+int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out);
+int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out);
+int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out);
+int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out);
+int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out);
+/**
+ * @}
+ */
+
+#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the
+ given object first. */
+/**
+ * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass
+ * instead of a required pointer to a struct containing AVClass. This is
+ * useful for searching for options without needing to allocate the corresponding
+ * object.
+ */
+#define AV_OPT_SEARCH_FAKE_OBJ 0x0002
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ *
+ * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable
+ * directly with av_set_string3(). Use special calls which take an options
+ * AVDictionary (e.g. avformat_open_input()) to set options found with this
+ * flag.
+ */
+const AVOption *av_opt_find(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags);
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ * @param[out] target_obj if non-NULL, an object to which the option belongs will be
+ * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present
+ * in search_flags. This parameter is ignored if search_flags contain
+ * AV_OPT_SEARCH_FAKE_OBJ.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ */
+const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags, void **target_obj);
+
+/**
+ * Iterate over all AVOptions belonging to obj.
+ *
+ * @param obj an AVOptions-enabled struct or a double pointer to an
+ * AVClass describing it.
+ * @param prev result of the previous call to av_opt_next() on this object
+ * or NULL
+ * @return next AVOption or NULL
+ */
+const AVOption *av_opt_next(void *obj, const AVOption *prev);
+
+/**
+ * Iterate over AVOptions-enabled children of obj.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return next AVOptions-enabled child or NULL
+ */
+void *av_opt_child_next(void *obj, void *prev);
+
+/**
+ * Iterate over potential AVOptions-enabled children of parent.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return AVClass corresponding to next potential child or NULL
+ */
+const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);
+
+/**
+ * @defgroup opt_set_funcs Option setting functions
+ * @{
+ * Those functions set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. In case of av_opt_set() if the field is not
+ * of a string type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be set on a child of obj.
+ *
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ */
+int av_opt_set (void *obj, const char *name, const char *val, int search_flags);
+int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags);
+int av_opt_set_double(void *obj, const char *name, double val, int search_flags);
+int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags);
+int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags);
+/**
+ * @}
+ */
+
+/**
+ * @defgroup opt_get_funcs Option getting functions
+ * @{
+ * Those functions get a value of the option with the given name from an object.
+ *
+ * @param[in] obj a struct whose first element is a pointer to an AVClass.
+ * @param[in] name name of the option to get.
+ * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be found in a child of obj.
+ * @param[out] out_val value of the option will be written here
+ * @return 0 on success, a negative error code otherwise
+ */
+/**
+ * @note the returned string will av_malloc()ed and must be av_free()ed by the caller
+ */
+int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val);
+int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val);
+int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val);
+int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val);
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_OPT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/parseutils.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/parseutils.h
new file mode 100644
index 000000000..0844abb2f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/parseutils.h
@@ -0,0 +1,124 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PARSEUTILS_H
+#define AVUTIL_PARSEUTILS_H
+
+#include <time.h>
+
+#include "rational.h"
+
+/**
+ * @file
+ * misc parsing utilities
+ */
+
+/**
+ * Parse str and put in width_ptr and height_ptr the detected values.
+ *
+ * @param[in,out] width_ptr pointer to the variable which will contain the detected
+ * width value
+ * @param[in,out] height_ptr pointer to the variable which will contain the detected
+ * height value
+ * @param[in] str the string to parse: it has to be a string in the format
+ * width x height or a valid video size abbreviation.
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);
+
+/**
+ * Parse str and store the detected values in *rate.
+ *
+ * @param[in,out] rate pointer to the AVRational which will contain the detected
+ * frame rate
+ * @param[in] str the string to parse: it has to be a string in the format
+ * rate_num / rate_den, a float number or a valid video rate abbreviation
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_rate(AVRational *rate, const char *str);
+
+/**
+ * Put the RGBA values that correspond to color_string in rgba_color.
+ *
+ * @param color_string a string specifying a color. It can be the name of
+ * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,
+ * possibly followed by "@" and a string representing the alpha
+ * component.
+ * The alpha component may be a string composed by "0x" followed by an
+ * hexadecimal number or a decimal number between 0.0 and 1.0, which
+ * represents the opacity value (0x00/0.0 means completely transparent,
+ * 0xff/1.0 completely opaque).
+ * If the alpha component is not specified then 0xff is assumed.
+ * The string "random" will result in a random color.
+ * @param slen length of the initial part of color_string containing the
+ * color. It can be set to -1 if color_string is a null terminated string
+ * containing nothing else than the color.
+ * @return >= 0 in case of success, a negative value in case of
+ * failure (for example if color_string cannot be parsed).
+ */
+int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+ void *log_ctx);
+
+/**
+ * Parse timestr and return in *time a corresponding number of
+ * microseconds.
+ *
+ * @param timeval puts here the number of microseconds corresponding
+ * to the string in timestr. If the string represents a duration, it
+ * is the number of microseconds contained in the time interval. If
+ * the string is a date, is the number of microseconds since 1st of
+ * January, 1970 up to the time of the parsed date. If timestr cannot
+ * be successfully parsed, set *time to INT64_MIN.
+
+ * @param timestr a string representing a date or a duration.
+ * - If a date the syntax is:
+ * @code
+ * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH[:MM[:SS[.m...]]]}|{HH[MM[SS[.m...]]]}}[Z]
+ * now
+ * @endcode
+ * If the value is "now" it takes the current time.
+ * Time is local time unless Z is appended, in which case it is
+ * interpreted as UTC.
+ * If the year-month-day part is not specified it takes the current
+ * year-month-day.
+ * - If a duration the syntax is:
+ * @code
+ * [-]HH[:MM[:SS[.m...]]]
+ * [-]S+[.m...]
+ * @endcode
+ * @param duration flag which tells how to interpret timestr, if not
+ * zero timestr is interpreted as a duration, otherwise as a date
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_parse_time(int64_t *timeval, const char *timestr, int duration);
+
+/**
+ * Attempt to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+/**
+ * Convert the decomposed UTC time in tm to a time_t value.
+ */
+time_t av_timegm(struct tm *tm);
+
+#endif /* AVUTIL_PARSEUTILS_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/pixdesc.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/pixdesc.h
new file mode 100644
index 000000000..47e6bb838
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/pixdesc.h
@@ -0,0 +1,223 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXDESC_H
+#define AVUTIL_PIXDESC_H
+
+#include <inttypes.h>
+#include "pixfmt.h"
+
+typedef struct AVComponentDescriptor{
+ uint16_t plane :2; ///< which of the 4 planes contains the component
+
+ /**
+ * Number of elements between 2 horizontally consecutive pixels minus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t step_minus1 :3;
+
+ /**
+ * Number of elements before the component of the first pixel plus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t offset_plus1 :3;
+ uint16_t shift :3; ///< number of least significant bits that must be shifted away to get the value
+ uint16_t depth_minus1 :4; ///< number of bits in the component minus 1
+}AVComponentDescriptor;
+
+/**
+ * Descriptor that unambiguously describes how the bits of a pixel are
+ * stored in the up to 4 data planes of an image. It also stores the
+ * subsampling factors and number of components.
+ *
+ * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV
+ * and all the YUV variants) AVPixFmtDescriptor just stores how values
+ * are stored not what these values represent.
+ */
+typedef struct AVPixFmtDescriptor{
+ const char *name;
+ uint8_t nb_components; ///< The number of components each pixel has, (1-4)
+
+ /**
+ * Amount to shift the luma width right to find the chroma width.
+ * For YV12 this is 1 for example.
+ * chroma_width = -((-luma_width) >> log2_chroma_w)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w)
+
+ /**
+ * Amount to shift the luma height right to find the chroma height.
+ * For YV12 this is 1 for example.
+ * chroma_height= -((-luma_height) >> log2_chroma_h)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_h;
+ uint8_t flags;
+
+ /**
+ * Parameters that describe how pixels are packed. If the format
+ * has chroma components, they must be stored in comp[1] and
+ * comp[2].
+ */
+ AVComponentDescriptor comp[4];
+}AVPixFmtDescriptor;
+
+#define PIX_FMT_BE 1 ///< Pixel format is big-endian.
+#define PIX_FMT_PAL 2 ///< Pixel format has a palette in data[1], values are indexes in this palette.
+#define PIX_FMT_BITSTREAM 4 ///< All values of a component are bit-wise packed end to end.
+#define PIX_FMT_HWACCEL 8 ///< Pixel format is an HW accelerated format.
+#define PIX_FMT_PLANAR 16 ///< At least one pixel component is not in the first data plane
+#define PIX_FMT_RGB 32 ///< The pixel format contains RGB-like data (as opposed to YUV/grayscale)
+/**
+ * The pixel format is "pseudo-paletted". This means that Libav treats it as
+ * paletted internally, but the palette is generated by the decoder and is not
+ * stored in the file.
+ */
+#define PIX_FMT_PSEUDOPAL 64
+
+#define PIX_FMT_ALPHA 128 ///< The pixel format has an alpha channel
+
+
+#if FF_API_PIX_FMT_DESC
+/**
+ * The array of all the pixel format descriptors.
+ */
+extern const AVPixFmtDescriptor av_pix_fmt_descriptors[];
+#endif
+
+/**
+ * Read a line from an image, and write the values of the
+ * pixel format component c to dst.
+ *
+ * @param data the array containing the pointers to the planes of the image
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to read
+ * @param y the vertical coordinate of the first pixel to read
+ * @param w the width of the line to read, that is the number of
+ * values to write to dst
+ * @param read_pal_component if not zero and the format is a paletted
+ * format writes the values corresponding to the palette
+ * component c in data[1] to dst, rather than the palette indexes in
+ * data[0]. The behavior is undefined if the format is not paletted.
+ */
+void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component);
+
+/**
+ * Write the values from src to the pixel format component c of an
+ * image line.
+ *
+ * @param src array containing the values to write
+ * @param data the array containing the pointers to the planes of the
+ * image to write into. It is supposed to be zeroed.
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to write
+ * @param y the vertical coordinate of the first pixel to write
+ * @param w the width of the line to write, that is the number of
+ * values to write to the image line
+ */
+void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc, int x, int y, int c, int w);
+
+/**
+ * Return the pixel format corresponding to name.
+ *
+ * If there is no pixel format with name name, then looks for a
+ * pixel format with the name corresponding to the native endian
+ * format of name.
+ * For example in a little-endian system, first looks for "gray16",
+ * then for "gray16le".
+ *
+ * Finally if no pixel format has been found, returns PIX_FMT_NONE.
+ */
+enum AVPixelFormat av_get_pix_fmt(const char *name);
+
+/**
+ * Return the short name for a pixel format, NULL in case pix_fmt is
+ * unknown.
+ *
+ * @see av_get_pix_fmt(), av_get_pix_fmt_string()
+ */
+const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt);
+
+/**
+ * Print in buf the string corresponding to the pixel format with
+ * number pix_fmt, or an header if pix_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param pix_fmt the number of the pixel format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ */
+char *av_get_pix_fmt_string (char *buf, int buf_size, enum AVPixelFormat pix_fmt);
+
+/**
+ * Return the number of bits per pixel used by the pixel format
+ * described by pixdesc.
+ *
+ * The returned number of bits refers to the number of bits actually
+ * used for storing the pixel information, that is padding bits are
+ * not counted.
+ */
+int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * @return a pixel format descriptor for provided pixel format or NULL if
+ * this pixel format is unknown.
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt);
+
+/**
+ * Iterate over all pixel format descriptors known to libavutil.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev);
+
+/**
+ * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc
+ * is not a valid pointer to a pixel format descriptor.
+ */
+enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * @param[in] pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_h
+ * @param[out] v_shift store log2_chroma_w
+ *
+ * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format
+ */
+int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,
+ int *h_shift, int *v_shift);
+
+
+#endif /* AVUTIL_PIXDESC_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/pixfmt.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/pixfmt.h
new file mode 100644
index 000000000..1072f0089
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/pixfmt.h
@@ -0,0 +1,268 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ *
+ */
+
+#include "libavutil/avconfig.h"
+#include "libavutil/version.h"
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @par
+ * When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ *
+ * @note
+ * Make sure that all newly added big-endian formats have pix_fmt & 1 == 1
+ * and that all newly added little-endian formats have pix_fmt & 1 == 0.
+ * This allows simpler detection of big vs little-endian.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+ AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ AV_PIX_FMT_XVMC_MPEG2_IDCT,
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+ AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ AV_PIX_FMT_Y400A, ///< 8bit gray, 8bit alpha
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+
+#if FF_API_PIX_FMT
+#include "old_pix_fmts.h"
+#endif
+};
+
+#if AV_HAVE_BIGENDIAN
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+
+#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#if FF_API_PIX_FMT
+#define PixelFormat AVPixelFormat
+
+#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le)
+
+#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32
+#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1
+#define PIX_FMT_BGR32 AV_PIX_FMT_BGR32
+#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1
+
+#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16
+#define PIX_FMT_RGB48 AV_PIX_FMT_RGB48
+#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565
+#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555
+#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444
+#define PIX_FMT_BGR48 AV_PIX_FMT_BGR48
+#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565
+#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555
+#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444
+
+#define PIX_FMT_YUV420P9 AV_PIX_FMT_YUV420P9
+#define PIX_FMT_YUV422P9 AV_PIX_FMT_YUV422P9
+#define PIX_FMT_YUV444P9 AV_PIX_FMT_YUV444P9
+#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10
+#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10
+#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10
+#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16
+#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16
+#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16
+
+#define PIX_FMT_GBRP9 AV_PIX_FMT_GBRP9
+#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10
+#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16
+#endif
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/random_seed.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/random_seed.h
new file mode 100644
index 000000000..b1fad13d0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/random_seed.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RANDOM_SEED_H
+#define AVUTIL_RANDOM_SEED_H
+
+#include <stdint.h>
+/**
+ * @addtogroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Get random data.
+ *
+ * This function can be called repeatedly to generate more random bits
+ * as needed. It is generally quite slow, and usually used to seed a
+ * PRNG. As it uses /dev/urandom and /dev/random, the quality of the
+ * returned random data depends on the platform.
+ */
+uint32_t av_get_random_seed(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RANDOM_SEED_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/rational.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/rational.h
new file mode 100644
index 000000000..5d7dab7fd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/rational.h
@@ -0,0 +1,155 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rational numbers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+/**
+ * rational number numerator/denominator
+ */
+typedef struct AVRational{
+ int num; ///< numerator
+ int den; ///< denominator
+} AVRational;
+
+/**
+ * Compare two rationals.
+ * @param a first rational
+ * @param b second rational
+ * @return 0 if a==b, 1 if a>b, -1 if a<b, and INT_MIN if one of the
+ * values is of the form 0/0
+ */
+static inline int av_cmp_q(AVRational a, AVRational b){
+ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+ if(tmp) return ((tmp ^ a.den ^ b.den)>>63)|1;
+ else if(b.den && a.den) return 0;
+ else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
+ else return INT_MIN;
+}
+
+/**
+ * Convert rational to double.
+ * @param a rational to convert
+ * @return (double) a
+ */
+static inline double av_q2d(AVRational a){
+ return a.num / (double) a.den;
+}
+
+/**
+ * Reduce a fraction.
+ * This is useful for framerate calculations.
+ * @param dst_num destination numerator
+ * @param dst_den destination denominator
+ * @param num source numerator
+ * @param den source denominator
+ * @param max the maximum allowed for dst_num & dst_den
+ * @return 1 if exact, 0 otherwise
+ */
+int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b first rational
+ * @param c second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b first rational
+ * @param c second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q)
+{
+ AVRational r = { q.den, q.num };
+ return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ * inf is expressed as {1,0} or {-1,0} depending on the sign.
+ *
+ * @param d double to convert
+ * @param max the maximum allowed numerator and denominator
+ * @return (AVRational) d
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer
+ * than q1, 0 if they have the same distance.
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the nearest value in q_list to q.
+ * @param q_list an array of rationals terminated by {0, 0}
+ * @return the index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/samplefmt.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/samplefmt.h
new file mode 100644
index 000000000..33cbdedf5
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/samplefmt.h
@@ -0,0 +1,220 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * Audio Sample Formats
+ *
+ * @par
+ * The data described by the sample format is always in native-endian order.
+ * Sample values can be expressed by native C types, hence the lack of a signed
+ * 24-bit sample format even though it is a common raw audio data format.
+ *
+ * @par
+ * The floating-point formats are based on full volume being in the range
+ * [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * @par
+ * The data layout as used in av_samples_fill_arrays() and elsewhere in Libav
+ * (such as AVFrame in libavcodec) is as follows:
+ *
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char *name);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return the packed alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return the planar alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Fill channel data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The pointers array is filled with the pointers to the samples data:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The linesize array is filled with the aligned size of each channel's data
+ * buffer for planar layout, or the aligned size of the buffer for all channels
+ * for packed layout.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return 0 on success or a negative error code on failure
+ */
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
+ const uint8_t *buf,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s), may be NULL
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return 0 on success or a negative error code on failure
+ * @see av_samples_fill_arrays()
+ */
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data array of pointers to data planes
+ * @param offset offset in samples at which to start filling
+ * @param nb_samples number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
+ int nb_channels, enum AVSampleFormat sample_fmt);
+
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/sha.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/sha.h
new file mode 100644
index 000000000..4c9a0c909
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/sha.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA_H
+#define AVUTIL_SHA_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_sha SHA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#if FF_API_CONTEXT_SIZE
+extern attribute_deprecated const int av_sha_size;
+#endif
+
+struct AVSHA;
+
+/**
+ * Allocate an AVSHA context.
+ */
+struct AVSHA *av_sha_alloc(void);
+
+/**
+ * Initialize SHA-1 or SHA-2 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha_size)
+ * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits)
+ * @return zero if initialization succeeded, -1 otherwise
+ */
+int av_sha_init(struct AVSHA* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data input data to update hash with
+ * @param len input data length
+ */
+void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest buffer where output digest value is stored
+ */
+void av_sha_final(struct AVSHA* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/time.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/time.h
new file mode 100644
index 000000000..b01a97d77
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/time.h
@@ -0,0 +1,39 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_H
+#define AVUTIL_TIME_H
+
+#include <stdint.h>
+
+/**
+ * Get the current time in microseconds.
+ */
+int64_t av_gettime(void);
+
+/**
+ * Sleep for a period of time. Although the duration is expressed in
+ * microseconds, the actual delay may be rounded to the precision of the
+ * system timer.
+ *
+ * @param usec Number of microseconds to sleep.
+ * @return zero on success or (negative) error code.
+ */
+int av_usleep(unsigned usec);
+
+#endif /* AVUTIL_TIME_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/version.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/version.h
new file mode 100644
index 000000000..1dbb11ca2
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/version.h
@@ -0,0 +1,87 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+#include "avutil.h"
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 52
+#define LIBAVUTIL_VERSION_MINOR 3
+#define LIBAVUTIL_VERSION_MICRO 0
+
+#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @}
+ *
+ * @defgroup depr_guards Deprecation guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @{
+ */
+
+#ifndef FF_API_PIX_FMT
+#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_CONTEXT_SIZE
+#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_PIX_FMT_DESC
+#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_AV_REVERSE
+#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_AUDIOCONVERT
+#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+#ifndef FF_API_CPU_FLAG_MMX2
+#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 53)
+#endif
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/include/libavutil/xtea.h b/dom/media/platforms/ffmpeg/libav54/include/libavutil/xtea.h
new file mode 100644
index 000000000..7d2b07bbc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/include/libavutil/xtea.h
@@ -0,0 +1,61 @@
+/*
+ * A 32-bit implementation of the XTEA algorithm
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_XTEA_H
+#define AVUTIL_XTEA_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_xtea XTEA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+typedef struct AVXTEA {
+ uint32_t key[16];
+} AVXTEA;
+
+/**
+ * Initialize an AVXTEA context.
+ *
+ * @param ctx an AVXTEA context
+ * @param key a key of 16 bytes used for encryption/decryption
+ */
+void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVXTEA context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_XTEA_H */
diff --git a/dom/media/platforms/ffmpeg/libav54/moz.build b/dom/media/platforms/ffmpeg/libav54/moz.build
new file mode 100644
index 000000000..2bdc1dc34
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav54/moz.build
@@ -0,0 +1,23 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ '../FFmpegAudioDecoder.cpp',
+ '../FFmpegDataDecoder.cpp',
+ '../FFmpegDecoderModule.cpp',
+ '../FFmpegVideoDecoder.cpp',
+]
+LOCAL_INCLUDES += [
+ '..',
+ 'include',
+]
+
+FINAL_LIBRARY = 'xul'
+
+if CONFIG['CLANG_CXX']:
+ CXXFLAGS += [
+ '-Wno-unknown-attributes',
+ ]
diff --git a/dom/media/platforms/ffmpeg/libav55/include/COPYING.LGPLv2.1 b/dom/media/platforms/ffmpeg/libav55/include/COPYING.LGPLv2.1
new file mode 100644
index 000000000..00b4fedfe
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/COPYING.LGPLv2.1
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/avcodec.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/avcodec.h
new file mode 100644
index 000000000..244f47ba1
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/avcodec.h
@@ -0,0 +1,4356 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec external API header
+ */
+
+#include <errno.h>
+#include "libavutil/samplefmt.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/cpu.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "version.h"
+
+#if FF_API_FAST_MALLOC
+// to provide fast_*alloc
+#include "libavutil/mem.h"
+#endif
+
+/**
+ * @defgroup libavc Encoding/Decoding Library
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ *
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of a existing codec ID changes (that would break ABI),
+ * 2. it is as close as possible to similar codecs.
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+#if FF_API_XVMC
+ AV_CODEC_ID_MPEG2VIDEO_XVMC,
+#endif /* FF_API_XVMC */
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+ AV_CODEC_ID_IFF_BYTERUN1,
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AIC,
+ AV_CODEC_ID_ESCAPE130,
+ AV_CODEC_ID_G2M,
+ AV_CODEC_ID_WEBP,
+ AV_CODEC_ID_HNM4_VIDEO,
+ AV_CODEC_ID_HEVC,
+ AV_CODEC_ID_FIC,
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+ AV_CODEC_ID_PCM_S24LE_PLANAR,
+ AV_CODEC_ID_PCM_S32LE_PLANAR,
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+#if FF_API_VOXWARE
+ AV_CODEC_ID_VOXWARE,
+#endif
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK,
+ AV_CODEC_ID_METASOUND,
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+
+ AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+};
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_get_descriptor()
+ */
+typedef struct AVCodecDescriptor {
+ enum AVCodecID id;
+ enum AVMediaType type;
+ /**
+ * Name of the codec described by this descriptor. It is non-empty and
+ * unique for each codec descriptor. It should contain alphanumeric
+ * characters and '_' only.
+ */
+ const char *name;
+ /**
+ * A more descriptive name for this codec. May be NULL.
+ */
+ const char *long_name;
+ /**
+ * Codec properties, a combination of AV_CODEC_PROP_* flags.
+ */
+ int props;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS (1 << 2)
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.<br>
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define FF_INPUT_BUFFER_PADDING_SIZE 8
+
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define FF_MIN_BUFFER_SIZE 16384
+
+
+/**
+ * @ingroup lavc_encoding
+ * motion estimation type.
+ */
+enum Motion_Est_ID {
+ ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed
+ ME_FULL,
+ ME_LOG,
+ ME_PHODS,
+ ME_EPZS, ///< enhanced predictive zonal search
+ ME_X1, ///< reserved for experiments
+ ME_HEX, ///< hexagon based search
+ ME_UMH, ///< uneven multi-hexagon search
+ ME_TESA, ///< transformed exhaustive search algorithm
+};
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVColorPrimaries{
+ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_BT470M = 4,
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
+ AVCOL_PRI_FILM = 8,
+ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
+ AVCOL_PRI_NB , ///< Not part of ABI
+};
+
+enum AVColorTransferCharacteristic{
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
+ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
+ AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt( 10 ) : 1 range)"
+ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
+ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10 bit system
+ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12 bit system
+ AVCOL_TRC_NB , ///< Not part of ABI
+};
+
+enum AVColorSpace{
+ AVCOL_SPC_RGB = 0,
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_FCC = 4,
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M = 7,
+ AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
+ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
+ AVCOL_SPC_NB , ///< Not part of ABI
+};
+
+enum AVColorRange{
+ AVCOL_RANGE_UNSPECIFIED = 0,
+ AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+ AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
+ AVCOL_RANGE_NB , ///< Not part of ABI
+};
+
+/**
+ * X X 3 4 X X are luma samples,
+ * 1 2 1-6 are possible chroma positions
+ * X X 5 6 X 0 is undefined/unknown position
+ */
+enum AVChromaLocation{
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default
+ AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263
+ AVCHROMA_LOC_TOPLEFT = 3, ///< DV
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB , ///< Not part of ABI
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
+};
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride{
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+#if FF_API_MAX_BFRAMES
+/**
+ * @deprecated there is no libavcodec-wide limit on the number of B-frames
+ */
+#define FF_MAX_B_FRAMES 16
+#endif
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define CODEC_FLAG_UNALIGNED 0x0001
+#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale.
+#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263.
+#define CODEC_FLAG_OUTPUT_CORRUPT 0x0008 ///< Output even those frames that might be corrupted
+#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC.
+#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
+#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>.
+/**
+ * The parent program guarantees that the input for B-frames containing
+ * streams is not written to for at least s->max_b_frames+1 frames, if
+ * this is not set the input will be copied.
+ */
+#define CODEC_FLAG_INPUT_PRESERVED 0x0100
+#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode.
+#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode.
+#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale.
+#if FF_API_EMU_EDGE
+/**
+ * @deprecated edges are not used/required anymore. I.e. this flag is now always
+ * set.
+ */
+#define CODEC_FLAG_EMU_EDGE 0x4000
+#endif
+#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding.
+#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random
+ location instead of only at frame boundaries. */
+#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization.
+#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.
+#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay.
+#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe.
+#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT).
+/* Fx : Flag for h263+ extra options */
+#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
+#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
+#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
+#define CODEC_FLAG_CLOSED_GOP 0x80000000
+#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks.
+#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
+#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
+#define CODEC_FLAG2_IGNORE_CROP 0x00010000 ///< Discard cropping information from SPS.
+
+#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
+
+/* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.
+/**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+#define CODEC_CAP_DR1 0x0002
+#define CODEC_CAP_TRUNCATED 0x0008
+#if FF_API_XVMC
+/* Codec can export data for HW decoding (XvMC). */
+#define CODEC_CAP_HWACCEL 0x0010
+#endif /* FF_API_XVMC */
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define CODEC_CAP_DELAY 0x0020
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define CODEC_CAP_SMALL_LAST_FRAME 0x0040
+#if FF_API_CAP_VDPAU
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define CODEC_CAP_HWACCEL_VDPAU 0x0080
+#endif
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define CODEC_CAP_SUBFRAMES 0x0100
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define CODEC_CAP_EXPERIMENTAL 0x0200
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define CODEC_CAP_CHANNEL_CONF 0x0400
+#if FF_API_NEG_LINESIZES
+/**
+ * @deprecated no codecs use this capability
+ */
+#define CODEC_CAP_NEG_LINESIZES 0x0800
+#endif
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define CODEC_CAP_FRAME_THREADS 0x1000
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define CODEC_CAP_SLICE_THREADS 0x2000
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define CODEC_CAP_PARAM_CHANGE 0x4000
+/**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+#define CODEC_CAP_AUTO_THREADS 0x8000
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
+
+#if FF_API_MB_TYPE
+//The following defines may change, don't expect compatibility if you use them.
+#define MB_TYPE_INTRA4x4 0x0001
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific
+#define MB_TYPE_16x16 0x0008
+#define MB_TYPE_16x8 0x0010
+#define MB_TYPE_8x16 0x0020
+#define MB_TYPE_8x8 0x0040
+#define MB_TYPE_INTERLACED 0x0080
+#define MB_TYPE_DIRECT2 0x0100 //FIXME
+#define MB_TYPE_ACPRED 0x0200
+#define MB_TYPE_GMC 0x0400
+#define MB_TYPE_SKIP 0x0800
+#define MB_TYPE_P0L0 0x1000
+#define MB_TYPE_P1L0 0x2000
+#define MB_TYPE_P0L1 0x4000
+#define MB_TYPE_P1L1 0x8000
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT 0x00010000
+#define MB_TYPE_CBP 0x00020000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
+#endif
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan{
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+}AVPanScan;
+
+#if FF_API_QSCALE_TYPE
+#define FF_QSCALE_TYPE_MPEG1 0
+#define FF_QSCALE_TYPE_MPEG2 1
+#define FF_QSCALE_TYPE_H264 2
+#define FF_QSCALE_TYPE_VP56 3
+#endif
+
+#if FF_API_GET_BUFFER
+#define FF_BUFFER_TYPE_INTERNAL 1
+#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
+#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
+#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.
+
+#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore).
+#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
+#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
+#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+#endif
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+ AV_PKT_DATA_PALETTE,
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+};
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames.
+ *
+ * AVPacket is one of the few structs in Libav, whose size is a part of public
+ * ABI. Thus it may be allocated on stack and no new fields can be added to it
+ * without libavcodec and libavformat major bump.
+ *
+ * The semantics of data ownership depends on the buf or destruct (deprecated)
+ * fields. If either is set, the packet data is dynamically allocated and is
+ * valid indefinitely until av_free_packet() is called (which in turn calls
+ * av_buffer_unref()/the destruct callback to free the data). If neither is set,
+ * the packet data is typically backed by some static buffer somewhere and is
+ * only valid for a limited time (e.g. until the next read call when demuxing).
+ *
+ * The side data is always allocated with av_malloc() and is freed in
+ * av_free_packet().
+ */
+typedef struct AVPacket {
+ /**
+ * A reference to the reference-counted buffer where the packet data is
+ * stored.
+ * May be NULL, then the packet data is not reference-counted.
+ */
+ AVBufferRef *buf;
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t *data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ struct {
+ uint8_t *data;
+ int size;
+ enum AVPacketSideDataType type;
+ } *side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int duration;
+#if FF_API_DESTRUCT_PACKET
+ attribute_deprecated
+ void (*destruct)(struct AVPacket *);
+ attribute_deprecated
+ void *priv;
+#endif
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * Time difference in AVStream->time_base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current packet.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+} AVPacket;
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+
+enum AVSideDataParamChangeFlags {
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+/**
+ * @}
+ */
+
+struct AVCodecInternal;
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, //< Top coded_first, top displayed first
+ AV_FIELD_BB, //< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, //< Top coded first, bottom displayed first
+ AV_FIELD_BT, //< Bottom coded first, top displayed first
+};
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass *av_class;
+ int log_level_offset;
+
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ const struct AVCodec *codec;
+ char codec_name[32];
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify the codec.
+ * If there are multiple such fields in a container then the demuxer should choose the one
+ * which maximizes the information about the used codec.
+ * If the codec tag field in a container is larger than 32 bits then the demuxer should
+ * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+ * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+ * first.
+ * - encoding: Set by user, if not then the default based on codec_id will be used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int codec_tag;
+
+ /**
+ * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * - encoding: unused
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int stream_codec_tag;
+
+ void *priv_data;
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal *internal;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.
+ */
+ int bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * mjpeg: Huffman tables
+ * rv10: additional flags
+ * mpeg4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid prolems if it is read with the bitstream reader.
+ * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t *extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * - encoding: MUST be set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational time_base;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Codec delay.
+ *
+ * Video:
+ * Number of frames the decoded output will be delayed relative to the
+ * encoded input.
+ *
+ * Audio:
+ * For encoding, this is the number of "priming" samples added to the
+ * beginning of the stream. The decoded output will be delayed by this
+ * many samples relative to the input to the encoder. Note that this
+ * field is purely informational and does not directly affect the pts
+ * output by the encoder, which should always be based on the actual
+ * presentation time, including any delay.
+ * For decoding, this is the number of samples the decoder needs to
+ * output before the decoder's output is valid. When seeking, you should
+ * start decoding this many samples prior to your desired seek point.
+ *
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int delay;
+
+
+ /* video only */
+ /**
+ * picture width / height.
+ * - encoding: MUST be set by user.
+ * - decoding: May be set by the user before opening the decoder if known e.g.
+ * from the container. Some decoders will require the dimensions
+ * to be set by the caller. During decoding, the decoder may
+ * overwrite those values as required.
+ */
+ int width, height;
+
+ /**
+ * Bitstream width / height, may be different from width/height e.g. when
+ * the decoded frame is cropped before being output.
+ * - encoding: unused
+ * - decoding: May be set by the user before opening the decoder if known
+ * e.g. from the container. During decoding, the decoder may
+ * overwrite those values as required.
+ */
+ int coded_width, coded_height;
+
+#if FF_API_ASPECT_EXTENDED
+#define FF_ASPECT_EXTENDED 15
+#endif
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see AV_PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overriden by the decoder if it knows better.
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec if known
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Motion estimation algorithm used for video coding.
+ * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),
+ * 8 (umh), 10 (tesa) [7, 8, 10 are x264 specific]
+ * - encoding: MUST be set by user.
+ * - decoding: unused
+ */
+ int me_method;
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext *s,
+ const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
+ int y, int type, int height);
+
+ /**
+ * callback to negotiate the pixelFormat
+ * @param fmt is the list of formats which are supported by the codec,
+ * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
+ * The first is always the native one.
+ * @return the chosen format
+ * - encoding: unused
+ * - decoding: Set by user, if not set the native format will be chosen.
+ */
+ enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /** obsolete FIXME remove */
+ int rc_strategy;
+#define FF_RC_STRATEGY_XVID 1
+
+ int b_frame_strategy;
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * 0-> h263 quant 1-> mpeg quant
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mpeg_quant;
+
+ /**
+ * qscale factor between P and I-frames
+ * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+ /**
+ * prediction method (needed for huffyuv)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int prediction_method;
+#define FF_PRED_LEFT 0
+#define FF_PRED_PLANE 1
+#define FF_PRED_MEDIAN 2
+
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int *slice_offset;
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * prepass for motion estimation
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_me;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+ /**
+ * DTG active format information (additional aspect ratio
+ * information only used in DVB MPEG-2 transport streams)
+ * 0 if not set.
+ *
+ * - encoding: unused
+ * - decoding: Set by decoder.
+ */
+ int dtg_active_format;
+#define FF_DTG_AFD_SAME 8
+#define FF_DTG_AFD_4_3 9
+#define FF_DTG_AFD_16_9 10
+#define FF_DTG_AFD_14_9 11
+#define FF_DTG_AFD_4_3_SP_14_9 13
+#define FF_DTG_AFD_16_9_SP_14_9 14
+#define FF_DTG_AFD_SP_4_3 15
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * intra quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_quant_bias;
+#define FF_DEFAULT_QUANT_BIAS 999999
+
+ /**
+ * inter quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int inter_quant_bias;
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+#if FF_API_XVMC
+ /**
+ * XVideo Motion Acceleration
+ * - encoding: forbidden
+ * - decoding: set by decoder
+ * @deprecated XvMC support is slated for removal.
+ */
+ attribute_deprecated int xvmc_acceleration;
+#endif /* FF_API_XVMC */
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *inter_matrix;
+
+ /**
+ * scene change detection threshold
+ * 0 is default, larger means fewer detected scene changes.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_threshold;
+
+ /**
+ * noise reduction strength
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int noise_reduction;
+
+ /**
+ * Motion estimation threshold below which no motion estimation is
+ * performed, but instead the user specified motion vectors are used.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_threshold;
+
+ /**
+ * Macroblock threshold below which the user specified macroblock types will be used.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_threshold;
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_dc_precision;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+ /**
+ * Border processing masking, raises the quantizer for mbs on the borders
+ * of the picture.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float border_masking;
+
+ /**
+ * minimum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_penalty_compensation;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int brd_scale;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * chroma qp offset from luma
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chromaoffset;
+
+ /**
+ * Multiplied by qscale for each frame and added to scene_change_score.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_factor;
+
+ /**
+ *
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Adjust sensitivity of b_frame_strategy 1.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int b_sensitivity;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by libavcodec
+ */
+ enum AVFieldOrder field_order;
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+ int channels; ///< number of audio channels
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Number of samples per channel in an audio frame.
+ *
+ * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+ * except the last must contain exactly frame_size samples per channel.
+ * May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the
+ * frame size is not restricted.
+ * - decoding: may be set by some decoders to indicate constant frame size
+ */
+ int frame_size;
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int frame_number;
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+#if FF_API_REQUEST_CHANNELS
+ /**
+ * Decoder should decode to this many channels if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated Deprecated in favor of request_channel_layout.
+ */
+ attribute_deprecated int request_channels;
+#endif
+
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ uint64_t request_channel_layout;
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * Used to request a sample format from the decoder.
+ * - encoding: unused.
+ * - decoding: Set by user.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+#if FF_API_GET_BUFFER
+ /**
+ * Called at the beginning of each frame to get a buffer for it.
+ *
+ * The function will set AVFrame.data[], AVFrame.linesize[].
+ * AVFrame.extended_data[] must also be set, but it should be the same as
+ * AVFrame.data[] except for planar audio with more channels than can fit
+ * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
+ * many data pointers as it can hold.
+ *
+ * if CODEC_CAP_DR1 is not set then get_buffer() must call
+ * avcodec_default_get_buffer() instead of providing buffers allocated by
+ * some other means.
+ *
+ * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
+ * need it. avcodec_default_get_buffer() aligns the output buffer properly,
+ * but if get_buffer() is overridden then alignment considerations should
+ * be taken into account.
+ *
+ * @see avcodec_default_get_buffer()
+ *
+ * Video:
+ *
+ * If pic.reference is set then the frame will be read later by libavcodec.
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * it may be called from a different thread, but not from more than one at
+ * once. Does not need to be reentrant.
+ *
+ * @see release_buffer(), reget_buffer()
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * Decoders cannot use the buffer after returning from
+ * avcodec_decode_audio4(), so they will not call release_buffer(), as it
+ * is assumed to be released immediately upon return. In some rare cases,
+ * a decoder may need to call get_buffer() more than once in a single
+ * call to avcodec_decode_audio4(). In that case, when get_buffer() is
+ * called again after it has already been called once, the previously
+ * acquired buffer is assumed to be released at that time and may not be
+ * reused by the decoder.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated use get_buffer2()
+ */
+ attribute_deprecated
+ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called to release buffers which were allocated with get_buffer.
+ * A released buffer can be reused in get_buffer().
+ * pic.data[*] must be set to NULL.
+ * May be called from a different thread if frame multithreading is used,
+ * but not by more than one thread at once, so does not need to be reentrant.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated custom freeing callbacks should be set from get_buffer2()
+ */
+ attribute_deprecated
+ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called at the beginning of a frame to get cr buffer for it.
+ * Buffer type (size, hints) must be the same. libavcodec won't check it.
+ * libavcodec will pass previous buffer in pic, function should return
+ * same buffer or new buffer with old frame "painted" into it.
+ * If pic.data[0] == NULL must behave like get_buffer().
+ * if CODEC_CAP_DR1 is not set then reget_buffer() must call
+ * avcodec_default_reget_buffer() instead of providing buffers allocated by
+ * some other means.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ attribute_deprecated
+ int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+#endif
+
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. What
+ * this means is, you may set however many entries in buf[] you feel necessary.
+ * Each buffer must be reference-counted using the AVBuffer API (see description
+ * of buf[] below).
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise exended_data must point to data
+ * - buf[] must contain one or more pointers to AVBufferRef structures. Each of
+ * the frame's data and extended_data pointers must be contained in these. That
+ * is, one AVBufferRef for each allocated chunk of memory, not necessarily one
+ * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(),
+ * and av_buffer_ref().
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * this callback may be called from a different thread, but not from more
+ * than one at once. Does not need to be reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+ /**
+ * If non-zero, the decoded audio and video frames returned from
+ * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
+ * and are valid indefinitely. The caller must free them with
+ * av_frame_unref() when they are not needed anymore.
+ * Otherwise, the decoded frames must not be freed by the caller and are
+ * only valid until the next decode call.
+ *
+ * - encoding: unused
+ * - decoding: set by the caller before avcodec_open2().
+ */
+ int refcounted_frames;
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+ /**
+ * ratecontrol qmin qmax limiting method
+ * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_qsquish;
+
+ float rc_qmod_amp;
+ int rc_qmod_freq;
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ int rc_override_count;
+ RcOverride *rc_override;
+
+ /**
+ * rate control equation
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ const char *rc_eq;
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_min_rate;
+
+ float rc_buffer_aggressivity;
+
+ /**
+ * initial complexity for pass1 ratecontrol
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float rc_initial_cplx;
+
+ /**
+ * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+#define FF_CODER_TYPE_VLC 0
+#define FF_CODER_TYPE_AC 1
+#define FF_CODER_TYPE_RAW 2
+#define FF_CODER_TYPE_RLE 3
+#define FF_CODER_TYPE_DEFLATE 4
+ /**
+ * coder type
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int coder_type;
+
+ /**
+ * context model
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int context_model;
+
+ /**
+ * minimum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmin;
+
+ /**
+ * maximum Lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int lmax;
+
+ /**
+ * frame skip threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_threshold;
+
+ /**
+ * frame skip factor
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_factor;
+
+ /**
+ * frame skip exponent
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_exp;
+
+ /**
+ * frame skip comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_cmp;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int min_prediction_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_prediction_order;
+
+ /**
+ * GOP timecode frame start number, in non drop frame format
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int64_t timecode_frame_start;
+
+ /* The RTP callback: This function is called */
+ /* every time the encoder has a packet to send. */
+ /* It depends on the encoder if the data starts */
+ /* with a Start Code (it should). H.263 does. */
+ /* mb_nb contains the number of macroblocks */
+ /* encoded in the RTP payload. */
+ void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
+
+ int rtp_payload_size; /* The size of the RTP payload: the coder will */
+ /* do its best to deliver a chunk with size */
+ /* below rtp_payload_size, the chunk will start */
+ /* with a start code on some codecs like H.263. */
+ /* This doesn't take account of any particular */
+ /* headers inside the transmitted RTP payload. */
+
+ /* statistics, used for 2-pass encoding */
+ int mv_bits;
+ int header_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int i_count;
+ int p_count;
+ int skip_count;
+ int misc_bits;
+
+ /**
+ * number of bits used for the previously encoded frame
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ int frame_bits;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char *stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char *stats_in;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#if FF_API_OLD_MSMPEG4
+#define FF_BUG_OLD_MSMPEG4 2
+#endif
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#if FF_API_AC_VLC
+#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default.
+#endif
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+
+ /**
+ * strictly follow the standard (MPEG4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ */
+ int strict_std_compliance;
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#if FF_API_DEBUG_MV
+/**
+ * @deprecated this option does nothing
+ */
+#define FF_DEBUG_MV 32
+#endif
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#define FF_DEBUG_PTS 0x00000200
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#if FF_API_DEBUG_MV
+#define FF_DEBUG_VIS_QP 0x00002000
+#define FF_DEBUG_VIS_MB_TYPE 0x00004000
+#endif
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+
+#if FF_API_DEBUG_MV
+ /**
+ * @deprecated this option does not have any effect
+ */
+ attribute_deprecated
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+#endif
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the codec) and print an error message on mismatch.
+ * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
+ * decoder returning an error.
+ */
+#define AV_EF_CRCCHECK (1<<0)
+#define AV_EF_BITSTREAM (1<<1)
+#define AV_EF_BUFFER (1<<2)
+#define AV_EF_EXPLODE (1<<3)
+
+ /**
+ * opaque 64bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ struct AVHWAccel *hwaccel;
+
+ /**
+ * Hardware accelerator context.
+ * For some hardware accelerators, a global context needs to be
+ * provided by the user. In that case, this holds display-dependent
+ * data Libav cannot instantiate itself. Please refer to the
+ * Libav HW accelerator documentation to know how to fill this
+ * is. e.g. for VA API, this is a struct vaapi_context.
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *hwaccel_context;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#define FF_DCT_INT 2
+#define FF_DCT_MMX 3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#define FF_IDCT_SH4 9
+#define FF_IDCT_SIMPLEARM 10
+#define FF_IDCT_IPP 13
+#define FF_IDCT_XVIDMMX 14
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#define FF_IDCT_SIMPLEVIS 18
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_SIMPLENEON 22
+#if FF_API_ARCH_ALPHA
+#define FF_IDCT_SIMPLEALPHA 23
+#endif
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+#if FF_API_LOWRES
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ *
+ * @deprecated use decoder private options instead
+ */
+ attribute_deprecated int lowres;
+#endif
+
+ /**
+ * the picture in the bitstream
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ AVFrame *coded_frame;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
+ * so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * Set by the client if its custom get_buffer() callback can be called
+ * synchronously from another thread, which allows faster multithreaded decoding.
+ * draw_horiz_band() will be called from other threads regardless of this setting.
+ * Ignored if the default get_buffer() is used.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_safe_callbacks;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to count-1.
+ * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+ * two instances of func executing at the same time will have the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement where when any call to func
+ * returns < 0 no further calls to func may be done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+#if FF_API_THREAD_OPAQUE
+ /**
+ * @deprecated this field should not be used from outside of lavc
+ */
+ attribute_deprecated
+ void *thread_opaque;
+#endif
+
+ /**
+ * noise vs. sse weight for the nsse comparsion function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+#define FF_PROFILE_AAC_HE 4
+#define FF_PROFILE_AAC_HE_V2 28
+#define FF_PROFILE_AAC_LD 22
+#define FF_PROFILE_AAC_ELD 38
+#define FF_PROFILE_MPEG2_AAC_LOW 128
+#define FF_PROFILE_MPEG2_AAC_HE 131
+
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+
+#define FF_PROFILE_MPEG2_422 0
+#define FF_PROFILE_MPEG2_HIGH 1
+#define FF_PROFILE_MPEG2_SS 2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+#define FF_PROFILE_MPEG2_MAIN 4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE 66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN 77
+#define FF_PROFILE_H264_EXTENDED 88
+#define FF_PROFILE_H264_HIGH 100
+#define FF_PROFILE_H264_HIGH_10 110
+#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_422 122
+#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_444 144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444 44
+
+#define FF_PROFILE_VC1_SIMPLE 0
+#define FF_PROFILE_VC1_MAIN 1
+#define FF_PROFILE_VC1_COMPLEX 2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 0
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 1
+#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 2
+#define FF_PROFILE_JPEG2000_DCINEMA_2K 3
+#define FF_PROFILE_JPEG2000_DCINEMA_4K 4
+
+
+#define FF_PROFILE_HEVC_MAIN 1
+#define FF_PROFILE_HEVC_MAIN_10 2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ *
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ uint8_t *subtitle_header;
+ int subtitle_header_size;
+
+#if FF_API_ERROR_RATE
+ /**
+ * @deprecated use the 'error_rate' private AVOption of the mpegvideo
+ * encoders
+ */
+ attribute_deprecated
+ int error_rate;
+#endif
+
+#if FF_API_CODEC_PKT
+ /**
+ * @deprecated this field is not supposed to be accessed from outside lavc
+ */
+ attribute_deprecated
+ AVPacket *pkt;
+#endif
+
+ /**
+ * VBV delay coded in the last frame (in periods of a 27 MHz clock).
+ * Used for compliant TS muxing.
+ * - encoding: Set by libavcodec.
+ * - decoding: unused.
+ */
+ uint64_t vbv_delay;
+} AVCodecContext;
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char *name; ///< short name for the profile
+} AVProfile;
+
+typedef struct AVCodecDefault AVCodecDefault;
+
+struct AVSubtitle;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see CODEC_CAP_*
+ */
+ int capabilities;
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+ const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+#if FF_API_LOWRES
+ attribute_deprecated uint8_t max_lowres; ///< maximum value for lowres supported by the decoder
+#endif
+ const AVClass *priv_class; ///< AVClass for the private context
+ const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ int priv_data_size;
+ struct AVCodec *next;
+ /**
+ * @name Frame-level threading support functions
+ * @{
+ */
+ /**
+ * If defined, called on thread contexts when they are created.
+ * If the codec allocates writable tables in init(), re-allocate them here.
+ * priv_data will be set to a copy of the original.
+ */
+ int (*init_thread_copy)(AVCodecContext *);
+ /**
+ * Copy necessary context variables from a previous thread context to the current one.
+ * If not defined, the next thread will start automatically; otherwise, the codec
+ * must call ff_thread_finish_setup().
+ *
+ * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+ */
+ int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
+ /** @} */
+
+ /**
+ * Private codec-specific defaults.
+ */
+ const AVCodecDefault *defaults;
+
+ /**
+ * Initialize codec static data, called from avcodec_register().
+ */
+ void (*init_static_data)(struct AVCodec *codec);
+
+ int (*init)(AVCodecContext *);
+ int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,
+ const struct AVSubtitle *sub);
+ /**
+ * Encode data to an AVPacket.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket (may contain a user-provided buffer)
+ * @param[in] frame AVFrame containing the raw data to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
+ int *got_packet_ptr);
+ int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
+ int (*close)(AVCodecContext *);
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(AVCodecContext *);
+} AVCodec;
+
+/**
+ * AVHWAccel.
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char *name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See AV_CODEC_ID_xxx
+ */
+ enum AVCodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see FF_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ struct AVHWAccel *next;
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext *avctx);
+
+ /**
+ * Size of HW accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int priv_data_size;
+} AVHWAccel;
+
+/**
+ * @defgroup lavc_picture AVPicture
+ *
+ * Functions for working with AVPicture
+ * @{
+ */
+
+/**
+ * four components are given, that's all.
+ * the last component is alpha
+ */
+typedef struct AVPicture {
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+ int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
+} AVPicture;
+
+/**
+ * @}
+ */
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * can be set for text/ass as well once they where rendered
+ */
+ AVPicture pict;
+ enum AVSubtitleType type;
+
+ char *text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The pressentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char *ass;
+ int flags;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect **rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
+ */
+AVCodec *av_codec_next(const AVCodec *c);
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char *avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char *avcodec_license(void);
+
+/**
+ * Register the codec codec and initialize libavcodec.
+ *
+ * @warning either this function or avcodec_register_all() must be called
+ * before any other libavcodec functions.
+ *
+ * @see avcodec_register_all()
+ */
+void avcodec_register(AVCodec *codec);
+
+/**
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
+ *
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
+ */
+void avcodec_register_all(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct can be deallocated by calling avcodec_close() on it followed
+ * by av_free().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ */
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
+
+/**
+ * Set the fields of the given AVCodecContext to default values corresponding
+ * to the given codec (defaults may be codec-dependent).
+ *
+ * Do not call this function if a non-NULL codec has been passed
+ * to avcodec_alloc_context3() that allocated this AVCodecContext.
+ * If codec is non-NULL, it is illegal to call avcodec_open2() with a
+ * different codec on this AVCodecContext.
+ */
+int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_class(void);
+
+/**
+ * Copy the settings of the source AVCodecContext into the destination
+ * AVCodecContext. The resulting destination codec context will be
+ * unopened, i.e. you are required to call avcodec_open2() before you
+ * can use this AVCodecContext to decode/encode video/audio data.
+ *
+ * @param dest target codec context, should be initialized with
+ * avcodec_alloc_context3(), but otherwise uninitialized
+ * @param src source codec context
+ * @return AVERROR() on error (e.g. memory allocation error), 0 on success
+ */
+int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
+
+#if FF_API_AVFRAME_LAVC
+/**
+ * @deprecated use av_frame_alloc()
+ */
+attribute_deprecated
+AVFrame *avcodec_alloc_frame(void);
+
+/**
+ * Set the fields of the given AVFrame to default values.
+ *
+ * @param frame The AVFrame of which the fields should be set to default values.
+ *
+ * @deprecated use av_frame_unref()
+ */
+attribute_deprecated
+void avcodec_get_frame_defaults(AVFrame *frame);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ *
+ * @warning this function does NOT free the data buffers themselves
+ * (it does not know how, since they might have been allocated with
+ * a custom get_buffer()).
+ *
+ * @deprecated use av_frame_free()
+ */
+attribute_deprecated
+void avcodec_free_frame(AVFrame **frame);
+#endif
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @code
+ * avcodec_register_all();
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * avcodec_get_context_defaults3() for this context, then this
+ * parameter MUST be either NULL or equal to the previously passed
+ * codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ * On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ * av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() /
+ * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
+ * do nothing.
+ */
+int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_packet
+ * @{
+ */
+
+#if FF_API_DESTRUCT_PACKET
+/**
+ * Default packet destructor.
+ * @deprecated use the AVBuffer API instead
+ */
+attribute_deprecated
+void av_destruct_packet(AVPacket *pkt);
+#endif
+
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ */
+void av_init_packet(AVPacket *pkt);
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ * buf and destruct fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ * function returns successfully, the data is owned by the underlying AVBuffer.
+ * The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ * size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
+
+/**
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated.
+ */
+int av_dup_packet(AVPacket *pkt);
+
+/**
+ * Free a packet.
+ *
+ * @param pkt packet to free
+ */
+void av_free_packet(AVPacket *pkt);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int *size);
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket *pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_packet_ref(AVPacket *dst, AVPacket *src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket *pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket *dst, AVPacket *src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ *
+ */
+int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder_by_name(const char *name);
+
+#if FF_API_GET_BUFFER
+attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#endif
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
+
+#if FF_API_EMU_EDGE
+/**
+ * Return the amount of padding in pixels which the get_buffer callback must
+ * provide around the edge of the image for codecs which do not have the
+ * CODEC_FLAG_EMU_EDGE flag.
+ *
+ * @return Required padding in pixels.
+ *
+ * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer
+ * needed
+ */
+attribute_deprecated
+unsigned avcodec_get_edge_width(void);
+#endif
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame and the return value would be
+ * less than the packet size. In this case, avcodec_decode_audio4 has to be
+ * called again with an AVPacket containing the remaining data in order to
+ * decode the second frame, etc... Even if no frames are returned, the packet
+ * needs to be fed to the decoder with remaining data until it is completely
+ * consumed or an error occurs.
+ *
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning samples. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no samples will be returned.
+ *
+ * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @param avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ * The decoder will allocate a buffer for the decoded frame by
+ * calling the AVCodecContext.get_buffer2() callback.
+ * When AVCodecContext.refcounted_frames is set to 1, the frame is
+ * reference counted and the returned reference belongs to the
+ * caller. The caller must release the frame using av_frame_unref()
+ * when the frame is no longer needed. The caller may safely write
+ * to the frame if av_frame_is_writable() returns 1.
+ * When AVCodecContext.refcounted_frames is set to 0, the returned
+ * reference belongs to the decoder and is valid only until the
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ * non-zero. Note that this field being set to zero
+ * does not mean that an error has occurred. For
+ * decoders with CODEC_CAP_DELAY set, no given decode
+ * call is guaranteed to produce a frame.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * At least avpkt->data and avpkt->size should be set. Some
+ * decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ * decoding, otherwise the number of bytes consumed from the input
+ * AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, AVPacket *avpkt);
+
+/**
+ * Decode the video frame of size avpkt->size from avpkt->data into picture.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame.
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] picture The AVFrame in which the decoded video frame will be stored.
+ * Use av_frame_alloc() to get an AVFrame. The codec will
+ * allocate memory for the actual bitmap by calling the
+ * AVCodecContext.get_buffer2() callback.
+ * When AVCodecContext.refcounted_frames is set to 1, the frame is
+ * reference counted and the returned reference belongs to the
+ * caller. The caller must release the frame using av_frame_unref()
+ * when the frame is no longer needed. The caller may safely write
+ * to the frame if av_frame_is_writable() returns 1.
+ * When AVCodecContext.refcounted_frames is set to 0, the returned
+ * reference belongs to the decoder and is valid only until the
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
+ *
+ * @param[in] avpkt The input AVpacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields like
+ * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
+ * fields possible.
+ * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ AVPacket *avpkt);
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expect to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform badly
+ * due to a potentially very different allocation pattern.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be
+ freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+ int *got_sub_ptr,
+ AVPacket *avpkt);
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+enum AVPictureStructure {
+ AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown
+ AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field
+ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field
+ AV_PICTURE_STRUCTURE_FRAME, //< coded as frame
+};
+
+typedef struct AVCodecParserContext {
+ void *priv_data;
+ struct AVCodecParser *parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ /**
+ * Time difference in stream time base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current frame.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+
+ /**
+ * Duration of the current frame.
+ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+ * For all other types, this is in units of AVCodecContext.time_base.
+ */
+ int duration;
+
+ enum AVFieldOrder field_order;
+
+ /**
+ * Indicate whether a picture is coded as a frame, top field or bottom field.
+ *
+ * For example, H.264 field_pic_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
+ * equal to 1 and bottom_field_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_TOP_FIELD.
+ */
+ enum AVPictureStructure picture_structure;
+
+ /**
+ * Picture number incremented in presentation or output order.
+ * This field may be reinitialized at the first picture of a new sequence.
+ *
+ * For example, this corresponds to H.264 PicOrderCnt.
+ */
+ int output_picture_number;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[5]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext *s);
+ int (*parser_parse)(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext *s);
+ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+ struct AVCodecParser *next;
+} AVCodecParser;
+
+AVCodecParser *av_parser_next(AVCodecParser *c);
+
+void av_register_codec_parser(AVCodecParser *parser);
+AVCodecParserContext *av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
+ * @param buf input buffer.
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos);
+
+/**
+ * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed
+ * @deprecated use AVBitstreamFilter
+ */
+int av_parser_change(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_parser_close(AVCodecParserContext *s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+/**
+ * Encode a frame of audio.
+ *
+ * Takes input samples from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay, split, and combine input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If it is not set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames except the last.
+ * The final frame may be smaller than avctx->frame_size.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+/**
+ * Encode a frame of video.
+ *
+ * Takes input raw video data from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay and reorder input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet. The returned data (if any) belongs to the
+ * caller, he is responsible for freeing it.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw video data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub);
+
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_picture
+ * @{
+ */
+
+/**
+ * Allocate memory for a picture. Call avpicture_free() to free it.
+ *
+ * @see avpicture_fill()
+ *
+ * @param picture the picture to be filled in
+ * @param pix_fmt the format of the picture
+ * @param width the width of the picture
+ * @param height the height of the picture
+ * @return zero if successful, a negative value if not
+ */
+int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Free a picture previously allocated by avpicture_alloc().
+ * The data buffer used by the AVPicture is freed, but the AVPicture structure
+ * itself is not.
+ *
+ * @param picture the AVPicture to be freed
+ */
+void avpicture_free(AVPicture *picture);
+
+/**
+ * Fill in the AVPicture fields.
+ * The fields of the given AVPicture are filled in by using the 'ptr' address
+ * which points to the image data buffer. Depending on the specified picture
+ * format, one or multiple image data pointers and line sizes will be set.
+ * If a planar format is specified, several pointers will be set pointing to
+ * the different picture planes and the line sizes of the different planes
+ * will be stored in the lines_sizes array.
+ * Call with ptr == NULL to get the required size for the ptr buffer.
+ *
+ * To allocate the buffer and fill in the AVPicture fields in one call,
+ * use avpicture_alloc().
+ *
+ * @param picture AVPicture whose fields are to be filled in
+ * @param ptr Buffer which will contain or contains the actual image data
+ * @param pix_fmt The format in which the picture data is stored.
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @return size of the image data in bytes
+ */
+int avpicture_fill(AVPicture *picture, uint8_t *ptr,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Copy pixel data from an AVPicture into a buffer.
+ * The data is stored compactly, without any gaps for alignment or padding
+ * which may be applied by avpicture_fill().
+ *
+ * @see avpicture_get_size()
+ *
+ * @param[in] src AVPicture containing image data
+ * @param[in] pix_fmt The format in which the picture data is stored.
+ * @param[in] width the width of the image in pixels.
+ * @param[in] height the height of the image in pixels.
+ * @param[out] dest A buffer into which picture data will be copied.
+ * @param[in] dest_size The size of 'dest'.
+ * @return The number of bytes written to dest, or a negative value (error code) on error.
+ */
+int avpicture_layout(const AVPicture* src, enum AVPixelFormat pix_fmt,
+ int width, int height,
+ unsigned char *dest, int dest_size);
+
+/**
+ * Calculate the size in bytes that a picture of the given width and height
+ * would occupy if stored in the given picture format.
+ * Note that this returns the size of a compact representation as generated
+ * by avpicture_layout(), which can be smaller than the size required for e.g.
+ * avpicture_fill().
+ *
+ * @param pix_fmt the given picture format
+ * @param width the width of the image
+ * @param height the height of the image
+ * @return Image data size in bytes or -1 on error (e.g. too large dimensions).
+ */
+int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
+
+#if FF_API_DEINTERLACE
+/**
+ * deinterlace - if not supported return -1
+ *
+ * @deprecated - use yadif (in libavfilter) instead
+ */
+attribute_deprecated
+int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height);
+#endif
+/**
+ * Copy image src to dst. Wraps av_picture_data_copy() above.
+ */
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Crop image top and left side.
+ */
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * Pad image.
+ */
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,
+ int padtop, int padbottom, int padleft, int padright, int *color);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * @deprecated Use av_pix_fmt_get_chroma_sub_sample
+ */
+
+void attribute_deprecated avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * avcodec_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur.
+ */
+int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,
+ int has_alpha);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt2() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+/**
+ * @}
+ */
+
+#if FF_API_SET_DIMENSIONS
+/**
+ * @deprecated this function is not supposed to be used from outside of lavc
+ */
+attribute_deprecated
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+#endif
+
+/**
+ * Put a string representing the codec tag codec_tag in buf.
+ *
+ * @param buf buffer to place codec tag in
+ * @param buf_size size in bytes of buf
+ * @param codec_tag codec tag to assign
+ * @return the length of the string that would have been generated if
+ * enough space had been available, excluding the trailing null
+ */
+size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+/**
+ * Fill audio frame data and linesize.
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align);
+
+/**
+ * Reset the internal decoder state / flush internal buffers. Should be called
+ * e.g. when seeking or when switching to a different stream.
+ *
+ * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0),
+ * this invalidates the frames previously returned from the decoder. When
+ * refcounted frames are used, the decoder just releases any references it might
+ * keep internally, but the caller's reference remains valid.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
+
+
+typedef struct AVBitStreamFilterContext {
+ void *priv_data;
+ struct AVBitStreamFilter *filter;
+ AVCodecParserContext *parser;
+ struct AVBitStreamFilterContext *next;
+} AVBitStreamFilterContext;
+
+
+typedef struct AVBitStreamFilter {
+ const char *name;
+ int priv_data_size;
+ int (*filter)(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+ void (*close)(AVBitStreamFilterContext *bsfc);
+ struct AVBitStreamFilter *next;
+} AVBitStreamFilter;
+
+void av_register_bitstream_filter(AVBitStreamFilter *bsf);
+AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
+int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
+
+AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f);
+
+/* memory */
+
+/**
+ * Allocate a buffer with padding, reusing the given one if large enough.
+ *
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * FF_INPUT_PADDING_SIZE at the end which will always memset to 0.
+ *
+ */
+void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+#if FF_API_MISSING_SAMPLE
+/**
+ * Log a generic warning message about a missing feature. This function is
+ * intended to be used internally by Libav (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] feature string containing the name of the missing feature
+ * @param[in] want_sample indicates if samples are wanted which exhibit this feature.
+ * If want_sample is non-zero, additional verbage will be added to the log
+ * message which tells the user how to report samples to the development
+ * mailing list.
+ * @deprecated Use avpriv_report_missing_feature() instead.
+ */
+attribute_deprecated
+void av_log_missing_feature(void *avc, const char *feature, int want_sample);
+
+/**
+ * Log a generic warning message asking for a sample. This function is
+ * intended to be used internally by Libav (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing an optional message, or NULL if no message
+ * @deprecated Use avpriv_request_sample() instead.
+ */
+attribute_deprecated
+void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);
+#endif /* FF_API_MISSING_SAMPLE */
+
+/**
+ * Register the hardware accelerator hwaccel.
+ */
+void av_register_hwaccel(AVHWAccel *hwaccel);
+
+/**
+ * If hwaccel is NULL, returns the first registered hardware accelerator,
+ * if hwaccel is non-NULL, returns the next registered hardware accelerator
+ * after hwaccel, or NULL if hwaccel is the last one.
+ */
+AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel);
+
+
+/**
+ * Lock operation used by lockmgr
+ */
+enum AVLockOp {
+ AV_LOCK_CREATE, ///< Create a mutex
+ AV_LOCK_OBTAIN, ///< Lock the mutex
+ AV_LOCK_RELEASE, ///< Unlock the mutex
+ AV_LOCK_DESTROY, ///< Free mutex resources
+};
+
+/**
+ * Register a user provided lock manager supporting the operations
+ * specified by AVLockOp. mutex points to a (void *) where the
+ * lockmgr should store/get a pointer to a user allocated mutex. It's
+ * NULL upon AV_LOCK_CREATE and != NULL for all other ops.
+ *
+ * @param cb User defined callback. Note: Libav may invoke calls to this
+ * callback during the call to av_lockmgr_register().
+ * Thus, the application must be prepared to handle that.
+ * If cb is set to NULL the lockmgr will be unregistered.
+ * Also note that during unregistration the previously registered
+ * lockmgr callback may also be invoked.
+ */
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext *s);
+
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec *codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec *codec);
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ * exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/avfft.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/avfft.h
new file mode 100644
index 000000000..e2e727da9
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/avfft.h
@@ -0,0 +1,118 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVFFT_H
+#define AVCODEC_AVFFT_H
+
+/**
+ * @file
+ * @ingroup lavc_fft
+ * FFT functions
+ */
+
+/**
+ * @defgroup lavc_fft FFT functions
+ * @ingroup lavc_misc
+ *
+ * @{
+ */
+
+typedef float FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext FFTContext;
+
+/**
+ * Set up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+FFTContext *av_fft_init(int nbits, int inverse);
+
+/**
+ * Do the permutation needed BEFORE calling ff_fft_calc().
+ */
+void av_fft_permute(FFTContext *s, FFTComplex *z);
+
+/**
+ * Do a complex FFT with the parameters defined in av_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+void av_fft_calc(FFTContext *s, FFTComplex *z);
+
+void av_fft_end(FFTContext *s);
+
+FFTContext *av_mdct_init(int nbits, int inverse, double scale);
+void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_end(FFTContext *s);
+
+/* Real Discrete Fourier Transform */
+
+enum RDFTransformType {
+ DFT_R2C,
+ IDFT_C2R,
+ IDFT_R2C,
+ DFT_C2R,
+};
+
+typedef struct RDFTContext RDFTContext;
+
+/**
+ * Set up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);
+void av_rdft_calc(RDFTContext *s, FFTSample *data);
+void av_rdft_end(RDFTContext *s);
+
+/* Discrete Cosine Transform */
+
+typedef struct DCTContext DCTContext;
+
+enum DCTTransformType {
+ DCT_II = 0,
+ DCT_III,
+ DCT_I,
+ DST_I,
+};
+
+/**
+ * Set up DCT.
+ *
+ * @param nbits size of the input array:
+ * (1 << nbits) for DCT-II, DCT-III and DST-I
+ * (1 << nbits) + 1 for DCT-I
+ * @param type the type of transform
+ *
+ * @note the first element of the input of DST-I is ignored
+ */
+DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
+void av_dct_calc(DCTContext *s, FFTSample *data);
+void av_dct_end (DCTContext *s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVFFT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/dxva2.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/dxva2.h
new file mode 100644
index 000000000..d161eb9f5
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/dxva2.h
@@ -0,0 +1,88 @@
+/*
+ * DXVA2 HW acceleration
+ *
+ * copyright (c) 2009 Laurent Aimar
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DXVA_H
+#define AVCODEC_DXVA_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_dxva2
+ * Public libavcodec DXVA2 header.
+ */
+
+#define _WIN32_WINNT 0x0600
+#include <stdint.h>
+#include <d3d9.h>
+#include <dxva2api.h>
+
+/**
+ * @defgroup lavc_codec_hwaccel_dxva2 DXVA2
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards
+
+/**
+ * This structure is used to provides the necessary configurations and data
+ * to the DXVA2 Libav HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct dxva_context {
+ /**
+ * DXVA2 decoder object
+ */
+ IDirectXVideoDecoder *decoder;
+
+ /**
+ * DXVA2 configuration used to create the decoder
+ */
+ const DXVA2_ConfigPictureDecode *cfg;
+
+ /**
+ * The number of surface in the surface array
+ */
+ unsigned surface_count;
+
+ /**
+ * The array of Direct3D surfaces used to create the decoder
+ */
+ LPDIRECT3DSURFACE9 *surface;
+
+ /**
+ * A bit field configuring the workarounds needed for using the decoder
+ */
+ uint64_t workaround;
+
+ /**
+ * Private to the Libav AVHWAccel implementation
+ */
+ unsigned report_id;
+};
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_DXVA_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vaapi.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vaapi.h
new file mode 100644
index 000000000..39e88259d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vaapi.h
@@ -0,0 +1,173 @@
+/*
+ * Video Acceleration API (shared data between Libav and the video player)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2008-2009 Splitted-Desktop Systems
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_H
+#define AVCODEC_VAAPI_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vaapi
+ * Public libavcodec VA API header.
+ */
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
+ * @ingroup lavc_codec_hwaccel
+ * @{
+ */
+
+/**
+ * This structure is used to share data between the Libav library and
+ * the client video application.
+ * This shall be zero-allocated and available as
+ * AVCodecContext.hwaccel_context. All user members can be set once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ */
+struct vaapi_context {
+ /**
+ * Window system dependent data
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *display;
+
+ /**
+ * Configuration ID
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t config_id;
+
+ /**
+ * Context ID (video decode pipeline)
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t context_id;
+
+ /**
+ * VAPictureParameterBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t pic_param_buf_id;
+
+ /**
+ * VAIQMatrixBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t iq_matrix_buf_id;
+
+ /**
+ * VABitPlaneBuffer ID (for VC-1 decoding)
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t bitplane_buf_id;
+
+ /**
+ * Slice parameter/data buffer IDs
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t *slice_buf_ids;
+
+ /**
+ * Number of effective slice buffer IDs to send to the HW
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int n_slice_buf_ids;
+
+ /**
+ * Size of pre-allocated slice_buf_ids
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_buf_ids_alloc;
+
+ /**
+ * Pointer to VASliceParameterBuffers
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ void *slice_params;
+
+ /**
+ * Size of a VASliceParameterBuffer element
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_param_size;
+
+ /**
+ * Size of pre-allocated slice_params
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_params_alloc;
+
+ /**
+ * Number of slices currently filled in
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_count;
+
+ /**
+ * Pointer to slice data buffer base
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ const uint8_t *slice_data;
+
+ /**
+ * Current size of slice data
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t slice_data_size;
+};
+
+/* @} */
+
+#endif /* AVCODEC_VAAPI_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vda.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vda.h
new file mode 100644
index 000000000..987b94f1f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vda.h
@@ -0,0 +1,142 @@
+/*
+ * VDA HW acceleration
+ *
+ * copyright (c) 2011 Sebastien Zwickert
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDA_H
+#define AVCODEC_VDA_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vda
+ * Public libavcodec VDA header.
+ */
+
+#include "libavcodec/version.h"
+
+#include <stdint.h>
+
+// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes
+// http://openradar.appspot.com/8026390
+#undef __GNUC_STDC_INLINE__
+
+#define Picture QuickdrawPicture
+#include <VideoDecodeAcceleration/VDADecoder.h>
+#undef Picture
+
+/**
+ * @defgroup lavc_codec_hwaccel_vda VDA
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+/**
+ * This structure is used to provide the necessary configurations and data
+ * to the VDA Libav HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct vda_context {
+ /**
+ * VDA decoder object.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ VDADecoder decoder;
+
+ /**
+ * The Core Video pixel buffer that contains the current image data.
+ *
+ * encoding: unused
+ * decoding: Set by libavcodec. Unset by user.
+ */
+ CVPixelBufferRef cv_buffer;
+
+ /**
+ * Use the hardware decoder in synchronous mode.
+ *
+ * encoding: unused
+ * decoding: Set by user.
+ */
+ int use_sync_decoding;
+
+ /**
+ * The frame width.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int width;
+
+ /**
+ * The frame height.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int height;
+
+ /**
+ * The frame format.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int format;
+
+ /**
+ * The pixel format for output image buffers.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ OSType cv_pix_fmt_type;
+
+ /**
+ * The current bitstream buffer.
+ */
+ uint8_t *priv_bitstream;
+
+ /**
+ * The current size of the bitstream.
+ */
+ int priv_bitstream_size;
+
+ /**
+ * The reference size used for fast reallocation.
+ */
+ int priv_allocated_size;
+};
+
+/** Create the video decoder. */
+int ff_vda_create_decoder(struct vda_context *vda_ctx,
+ uint8_t *extradata,
+ int extradata_size);
+
+/** Destroy the video decoder. */
+int ff_vda_destroy_decoder(struct vda_context *vda_ctx);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_VDA_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vdpau.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vdpau.h
new file mode 100644
index 000000000..75cb1bf7a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/vdpau.h
@@ -0,0 +1,189 @@
+/*
+ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
+ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
+ *
+ * Copyright (C) 2008 NVIDIA
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDPAU_H
+#define AVCODEC_VDPAU_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vdpau
+ * Public libavcodec VDPAU header.
+ */
+
+
+/**
+ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
+ * @ingroup lavc_codec_hwaccel
+ *
+ * VDPAU hardware acceleration has two modules
+ * - VDPAU decoding
+ * - VDPAU presentation
+ *
+ * The VDPAU decoding module parses all headers using Libav
+ * parsing mechanisms and uses VDPAU for the actual decoding.
+ *
+ * As per the current implementation, the actual decoding
+ * and rendering (API calls) are done as part of the VDPAU
+ * presentation (vo_vdpau.c) module.
+ *
+ * @{
+ */
+
+#include <vdpau/vdpau.h>
+#include <vdpau/vdpau_x11.h>
+
+#include "libavutil/attributes.h"
+
+#include "avcodec.h"
+#include "version.h"
+
+#if FF_API_BUFS_VDPAU
+union AVVDPAUPictureInfo {
+ VdpPictureInfoH264 h264;
+ VdpPictureInfoMPEG1Or2 mpeg;
+ VdpPictureInfoVC1 vc1;
+ VdpPictureInfoMPEG4Part2 mpeg4;
+};
+#endif
+
+/**
+ * This structure is used to share data between the libavcodec library and
+ * the client video application.
+ * The user shall zero-allocate the structure and make it available as
+ * AVCodecContext.hwaccel_context. Members can be set by the user once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ *
+ * The size of this structure is not a part of the public ABI and must not
+ * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an
+ * AVVDPAUContext.
+ */
+typedef struct AVVDPAUContext {
+ /**
+ * VDPAU decoder handle
+ *
+ * Set by user.
+ */
+ VdpDecoder decoder;
+
+ /**
+ * VDPAU decoder render callback
+ *
+ * Set by the user.
+ */
+ VdpDecoderRender *render;
+
+#if FF_API_BUFS_VDPAU
+ /**
+ * VDPAU picture information
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ union AVVDPAUPictureInfo info;
+
+ /**
+ * Allocated size of the bitstream_buffers table.
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ int bitstream_buffers_allocated;
+
+ /**
+ * Useful bitstream buffers in the bitstream buffers table.
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ int bitstream_buffers_used;
+
+ /**
+ * Table of bitstream buffers.
+ * The user is responsible for freeing this buffer using av_freep().
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ VdpBitstreamBuffer *bitstream_buffers;
+#endif
+} AVVDPAUContext;
+
+/**
+ * Allocate an AVVDPAUContext.
+ *
+ * @return Newly-allocated AVVDPAUContext or NULL on failure.
+ */
+AVVDPAUContext *av_vdpau_alloc_context(void);
+
+/**
+ * Get a decoder profile that should be used for initializing a VDPAU decoder.
+ * Should be called from the AVCodecContext.get_format() callback.
+ *
+ * @param avctx the codec context being used for decoding the stream
+ * @param profile a pointer into which the result will be written on success.
+ * The contents of profile are undefined if this function returns
+ * an error.
+ *
+ * @return 0 on success (non-negative), a negative AVERROR on failure.
+ */
+int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
+
+#if FF_API_CAP_VDPAU
+/** @brief The videoSurface is used for rendering. */
+#define FF_VDPAU_STATE_USED_FOR_RENDER 1
+
+/**
+ * @brief The videoSurface is needed for reference/prediction.
+ * The codec manipulates this.
+ */
+#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2
+
+/**
+ * @brief This structure is used as a callback between the Libav
+ * decoder (vd_) and presentation (vo_) module.
+ * This is used for defining a video frame containing surface,
+ * picture parameter, bitstream information etc which are passed
+ * between the Libav decoder and its clients.
+ */
+struct vdpau_render_state {
+ VdpVideoSurface surface; ///< Used as rendered surface, never changed.
+
+ int state; ///< Holds FF_VDPAU_STATE_* values.
+
+ /** picture parameter information for all supported codecs */
+ union AVVDPAUPictureInfo info;
+
+ /** Describe size/location of the compressed video data.
+ Set to 0 when freeing bitstream_buffers. */
+ int bitstream_buffers_allocated;
+ int bitstream_buffers_used;
+ /** The user is responsible for freeing this buffer using av_freep(). */
+ VdpBitstreamBuffer *bitstream_buffers;
+};
+#endif
+
+/* @}*/
+
+#endif /* AVCODEC_VDPAU_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/version.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/version.h
new file mode 100644
index 000000000..cdd5a613d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/version.h
@@ -0,0 +1,127 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#include "libavutil/version.h"
+
+#define LIBAVCODEC_VERSION_MAJOR 55
+#define LIBAVCODEC_VERSION_MINOR 34
+#define LIBAVCODEC_VERSION_MICRO 1
+
+#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+
+#ifndef FF_API_REQUEST_CHANNELS
+#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_DEINTERLACE
+#define FF_API_DEINTERLACE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_DESTRUCT_PACKET
+#define FF_API_DESTRUCT_PACKET (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_GET_BUFFER
+#define FF_API_GET_BUFFER (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_MISSING_SAMPLE
+#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_LOWRES
+#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_CAP_VDPAU
+#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_BUFS_VDPAU
+#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_VOXWARE
+#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_SET_DIMENSIONS
+#define FF_API_SET_DIMENSIONS (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_DEBUG_MV
+#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_AC_VLC
+#define FF_API_AC_VLC (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_OLD_MSMPEG4
+#define FF_API_OLD_MSMPEG4 (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_ASPECT_EXTENDED
+#define FF_API_ASPECT_EXTENDED (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_THREAD_OPAQUE
+#define FF_API_THREAD_OPAQUE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_CODEC_PKT
+#define FF_API_CODEC_PKT (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_ARCH_ALPHA
+#define FF_API_ARCH_ALPHA (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_ERROR_RATE
+#define FF_API_ERROR_RATE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_QSCALE_TYPE
+#define FF_API_QSCALE_TYPE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_MB_TYPE
+#define FF_API_MB_TYPE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_MAX_BFRAMES
+#define FF_API_MAX_BFRAMES (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_FAST_MALLOC
+#define FF_API_FAST_MALLOC (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_NEG_LINESIZES
+#define FF_API_NEG_LINESIZES (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+#ifndef FF_API_EMU_EDGE
+#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 56)
+#endif
+
+#endif /* AVCODEC_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavcodec/xvmc.h b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/xvmc.h
new file mode 100644
index 000000000..950ed1827
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavcodec/xvmc.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2003 Ivan Kalvachev
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_XVMC_H
+#define AVCODEC_XVMC_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_xvmc
+ * Public libavcodec XvMC header.
+ */
+
+#include <X11/extensions/XvMC.h>
+
+#include "libavutil/attributes.h"
+#include "version.h"
+#include "avcodec.h"
+
+#if FF_API_XVMC
+
+/**
+ * @defgroup lavc_codec_hwaccel_xvmc XvMC
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct
+ the number is 1337 speak for the letters IDCT MCo (motion compensation) */
+
+attribute_deprecated struct xvmc_pix_fmt {
+ /** The field contains the special constant value AV_XVMC_ID.
+ It is used as a test that the application correctly uses the API,
+ and that there is no corruption caused by pixel routines.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int xvmc_id;
+
+ /** Pointer to the block array allocated by XvMCCreateBlocks().
+ The array has to be freed by XvMCDestroyBlocks().
+ Each group of 64 values represents one data block of differential
+ pixel information (in MoCo mode) or coefficients for IDCT.
+ - application - set the pointer during initialization
+ - libavcodec - fills coefficients/pixel data into the array
+ */
+ short* data_blocks;
+
+ /** Pointer to the macroblock description array allocated by
+ XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks().
+ - application - set the pointer during initialization
+ - libavcodec - fills description data into the array
+ */
+ XvMCMacroBlock* mv_blocks;
+
+ /** Number of macroblock descriptions that can be stored in the mv_blocks
+ array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_mv_blocks;
+
+ /** Number of blocks that can be stored at once in the data_blocks array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_data_blocks;
+
+ /** Indicate that the hardware would interpret data_blocks as IDCT
+ coefficients and perform IDCT on them.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int idct;
+
+ /** In MoCo mode it indicates that intra macroblocks are assumed to be in
+ unsigned format; same as the XVMC_INTRA_UNSIGNED flag.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int unsigned_intra;
+
+ /** Pointer to the surface allocated by XvMCCreateSurface().
+ It has to be freed by XvMCDestroySurface() on application exit.
+ It identifies the frame and its state on the video hardware.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ XvMCSurface* p_surface;
+
+/** Set by the decoder before calling ff_draw_horiz_band(),
+ needed by the XvMCRenderSurface function. */
+//@{
+ /** Pointer to the surface used as past reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_past_surface;
+
+ /** Pointer to the surface used as future reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_future_surface;
+
+ /** top/bottom field or frame
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int picture_structure;
+
+ /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int flags;
+//}@
+
+ /** Number of macroblock descriptions in the mv_blocks array
+ that have already been passed to the hardware.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may increment it
+ with filled_mb_block_num or zero both.
+ - libavcodec - unchanged
+ */
+ int start_mv_blocks_num;
+
+ /** Number of new macroblock descriptions in the mv_blocks array (after
+ start_mv_blocks_num) that are filled by libavcodec and have to be
+ passed to the hardware.
+ - application - zeroes it on get_buffer() or after successful
+ ff_draw_horiz_band().
+ - libavcodec - increment with one of each stored MB
+ */
+ int filled_mv_blocks_num;
+
+ /** Number of the next free data block; one data block consists of
+ 64 short values in the data_blocks array.
+ All blocks before this one have already been claimed by placing their
+ position into the corresponding block description structure field,
+ that are part of the mv_blocks array.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may zero it together
+ with start_mb_blocks_num.
+ - libavcodec - each decoded macroblock increases it by the number
+ of coded blocks it contains.
+ */
+ int next_free_data_block_num;
+};
+
+/**
+ * @}
+ */
+
+#endif /* FF_API_XVMC */
+
+#endif /* AVCODEC_XVMC_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/adler32.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/adler32.h
new file mode 100644
index 000000000..a8ff6f9d4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/adler32.h
@@ -0,0 +1,43 @@
+/*
+ * copyright (c) 2006 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ADLER32_H
+#define AVUTIL_ADLER32_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+/**
+ * @ingroup lavu_crypto
+ * Calculate the Adler32 checksum of a buffer.
+ *
+ * Passing the return value to a subsequent av_adler32_update() call
+ * allows the checksum of multiple buffers to be calculated as though
+ * they were concatenated.
+ *
+ * @param adler initial checksum value
+ * @param buf pointer to input buffer
+ * @param len size of input buffer
+ * @return updated checksum
+ */
+unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
+ unsigned int len) av_pure;
+
+#endif /* AVUTIL_ADLER32_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/aes.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/aes.h
new file mode 100644
index 000000000..edff275b7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/aes.h
@@ -0,0 +1,67 @@
+/*
+ * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AES_H
+#define AVUTIL_AES_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_aes AES
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#if FF_API_CONTEXT_SIZE
+extern attribute_deprecated const int av_aes_size;
+#endif
+
+struct AVAES;
+
+/**
+ * Allocate an AVAES context.
+ */
+struct AVAES *av_aes_alloc(void);
+
+/**
+ * Initialize an AVAES context.
+ * @param key_bits 128, 192 or 256
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ * @param count number of 16 byte blocks
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AES_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/attributes.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/attributes.h
new file mode 100644
index 000000000..d7f2bb5c6
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/attributes.h
@@ -0,0 +1,126 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y)
+#else
+# define AV_GCC_VERSION_AT_LEAST(x,y) 0
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_always_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+# define av_always_inline __forceinline
+#else
+# define av_always_inline inline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_noinline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define av_noinline __declspec(noinline)
+#else
+# define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,6)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define attribute_deprecated __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define attribute_deprecated __declspec(deprecated)
+#else
+# define attribute_deprecated
+#endif
+
+#if defined(__GNUC__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,3)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+
+#if defined(__GNUC__) && !defined(__ICC)
+# define av_uninit(x) x=x
+#else
+# define av_uninit(x) x
+#endif
+
+#ifdef __GNUC__
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,5)
+# define av_noreturn __attribute__((noreturn))
+#else
+# define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/audio_fifo.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/audio_fifo.h
new file mode 100644
index 000000000..8c7638825
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/audio_fifo.h
@@ -0,0 +1,146 @@
+/*
+ * Audio FIFO
+ * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio FIFO Buffer
+ */
+
+#ifndef AVUTIL_AUDIO_FIFO_H
+#define AVUTIL_AUDIO_FIFO_H
+
+#include "avutil.h"
+#include "fifo.h"
+#include "samplefmt.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * Context for an Audio FIFO Buffer.
+ *
+ * - Operates at the sample level rather than the byte level.
+ * - Supports multiple channels with either planar or packed sample format.
+ * - Automatic reallocation when writing to a full buffer.
+ */
+typedef struct AVAudioFifo AVAudioFifo;
+
+/**
+ * Free an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to free
+ */
+void av_audio_fifo_free(AVAudioFifo *af);
+
+/**
+ * Allocate an AVAudioFifo.
+ *
+ * @param sample_fmt sample format
+ * @param channels number of channels
+ * @param nb_samples initial allocation size, in samples
+ * @return newly allocated AVAudioFifo, or NULL on error
+ */
+AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,
+ int nb_samples);
+
+/**
+ * Reallocate an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to reallocate
+ * @param nb_samples new allocation size, in samples
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Write data to an AVAudioFifo.
+ *
+ * The AVAudioFifo will be reallocated automatically if the available space
+ * is less than nb_samples.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to write to
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to write
+ * @return number of samples actually written, or negative AVERROR
+ * code on failure.
+ */
+int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Read data from an AVAudioFifo.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to read from
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to read
+ * @return number of samples actually read, or negative AVERROR code
+ * on failure.
+ */
+int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Drain data from an AVAudioFifo.
+ *
+ * Removes the data without reading it.
+ *
+ * @param af AVAudioFifo to drain
+ * @param nb_samples number of samples to drain
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Reset the AVAudioFifo buffer.
+ *
+ * This empties all data in the buffer.
+ *
+ * @param af AVAudioFifo to reset
+ */
+void av_audio_fifo_reset(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for reading.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for reading
+ */
+int av_audio_fifo_size(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for writing.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for writing
+ */
+int av_audio_fifo_space(AVAudioFifo *af);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AUDIO_FIFO_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/audioconvert.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/audioconvert.h
new file mode 100644
index 000000000..300a67cd3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/audioconvert.h
@@ -0,0 +1,6 @@
+
+#include "version.h"
+
+#if FF_API_AUDIOCONVERT
+#include "channel_layout.h"
+#endif
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/avassert.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avassert.h
new file mode 100644
index 000000000..b223d26e8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avassert.h
@@ -0,0 +1,66 @@
+/*
+ * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple assert() macros that are a bit more flexible than ISO C assert().
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_AVASSERT_H
+#define AVUTIL_AVASSERT_H
+
+#include <stdlib.h>
+#include "avutil.h"
+#include "log.h"
+
+/**
+ * assert() equivalent, that is always enabled.
+ */
+#define av_assert0(cond) do { \
+ if (!(cond)) { \
+ av_log(NULL, AV_LOG_FATAL, "Assertion %s failed at %s:%d\n", \
+ AV_STRINGIFY(cond), __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+
+
+/**
+ * assert() equivalent, that does not lie in speed critical code.
+ * These asserts() thus can be enabled without fearing speedloss.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0
+#define av_assert1(cond) av_assert0(cond)
+#else
+#define av_assert1(cond) ((void)0)
+#endif
+
+
+/**
+ * assert() equivalent, that does lie in speed critical code.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+#define av_assert2(cond) av_assert0(cond)
+#else
+#define av_assert2(cond) ((void)0)
+#endif
+
+#endif /* AVUTIL_AVASSERT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/avconfig.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avconfig.h
new file mode 100644
index 000000000..f10aa6186
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avconfig.h
@@ -0,0 +1,6 @@
+/* Generated by ffconf */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 1
+#endif /* AVUTIL_AVCONFIG_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/avstring.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avstring.h
new file mode 100644
index 000000000..b7d10983c
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avstring.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2007 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVSTRING_H
+#define AVUTIL_AVSTRING_H
+
+#include <stddef.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_string
+ * @{
+ */
+
+/**
+ * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
+ * the address of the first character in str after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_strstart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Return non-zero if pfx is a prefix of str independent of case. If
+ * it is, *ptr is set to the address of the first character in str
+ * after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_stristart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Locate the first case-independent occurrence in the string haystack
+ * of the string needle. A zero-length string needle is considered to
+ * match at the start of haystack.
+ *
+ * This function is a case-insensitive version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_stristr(const char *haystack, const char *needle);
+
+/**
+ * Locate the first occurrence of the string needle in the string haystack
+ * where not more than hay_length characters are searched. A zero-length
+ * string needle is considered to match at the start of haystack.
+ *
+ * This function is a length-limited version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @param hay_length length of string to search in
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_strnstr(const char *haystack, const char *needle, size_t hay_length);
+
+/**
+ * Copy the string src to dst, but no more than size - 1 bytes, and
+ * null-terminate dst.
+ *
+ * This function is the same as BSD strlcpy().
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the length of src
+ *
+ * @warning since the return value is the length of src, src absolutely
+ * _must_ be a properly 0-terminated string, otherwise this will read beyond
+ * the end of the buffer and possibly crash.
+ */
+size_t av_strlcpy(char *dst, const char *src, size_t size);
+
+/**
+ * Append the string src to the string dst, but to a total length of
+ * no more than size - 1 bytes, and null-terminate dst.
+ *
+ * This function is similar to BSD strlcat(), but differs when
+ * size <= strlen(dst).
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the total length of src and dst
+ *
+ * @warning since the return value use the length of src and dst, these
+ * absolutely _must_ be a properly 0-terminated strings, otherwise this
+ * will read beyond the end of the buffer and possibly crash.
+ */
+size_t av_strlcat(char *dst, const char *src, size_t size);
+
+/**
+ * Append output to a string, according to a format. Never write out of
+ * the destination buffer, and always put a terminating 0 within
+ * the buffer.
+ * @param dst destination buffer (string to which the output is
+ * appended)
+ * @param size total size of the destination buffer
+ * @param fmt printf-compatible format string, specifying how the
+ * following parameters are used
+ * @return the length of the string that would have been generated
+ * if enough space had been available
+ */
+size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Convert a number to a av_malloced string.
+ */
+char *av_d2str(double d);
+
+/**
+ * Unescape the given string until a non escaped terminating char,
+ * and return the token corresponding to the unescaped string.
+ *
+ * The normal \ and ' escaping is supported. Leading and trailing
+ * whitespaces are removed, unless they are escaped with '\' or are
+ * enclosed between ''.
+ *
+ * @param buf the buffer to parse, buf will be updated to point to the
+ * terminating char
+ * @param term a 0-terminated list of terminating chars
+ * @return the malloced unescaped string, which must be av_freed by
+ * the user, NULL in case of allocation failure
+ */
+char *av_get_token(const char **buf, const char *term);
+
+/**
+ * Locale-independent conversion of ASCII isdigit.
+ */
+int av_isdigit(int c);
+
+/**
+ * Locale-independent conversion of ASCII isgraph.
+ */
+int av_isgraph(int c);
+
+/**
+ * Locale-independent conversion of ASCII isspace.
+ */
+int av_isspace(int c);
+
+/**
+ * Locale-independent conversion of ASCII characters to uppercase.
+ */
+static inline int av_toupper(int c)
+{
+ if (c >= 'a' && c <= 'z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII characters to lowercase.
+ */
+static inline int av_tolower(int c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII isxdigit.
+ */
+int av_isxdigit(int c);
+
+/*
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strcasecmp(const char *a, const char *b);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strncasecmp(const char *a, const char *b, size_t n);
+
+
+/**
+ * Thread safe basename.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return pointer to the basename substring.
+ */
+const char *av_basename(const char *path);
+
+/**
+ * Thread safe dirname.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return the path with the separator replaced by the string terminator or ".".
+ * @note the function may change the input string.
+ */
+const char *av_dirname(char *path);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AVSTRING_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/avutil.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avutil.h
new file mode 100644
index 000000000..a0d35d162
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/avutil.h
@@ -0,0 +1,284 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * external API header
+ */
+
+/**
+ * @mainpage
+ *
+ * @section libav_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by Libav.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @ref lavfi "libavfilter" graph-based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lavr "libavresample" audio resampling, format conversion and mixing
+ * @li @ref libsws "libswscale" color conversion and scaling library
+ *
+ * @section libav_versioning Versioning and compatibility
+ *
+ * Each of the Libav libraries contains a version.h header, which defines a
+ * major, minor and micro version number with the
+ * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version
+ * number is incremented with backward incompatible changes - e.g. removing
+ * parts of the public API, reordering public struct members, etc. The minor
+ * version number is incremented for backward compatible API changes or major
+ * new features - e.g. adding a new public function or a new decoder. The micro
+ * version number is incremented for smaller changes that a calling program
+ * might still want to check for - e.g. changing behavior in a previously
+ * unspecified situation.
+ *
+ * Libav guarantees backward API and ABI compatibility for each library as long
+ * as its major version number is unchanged. This means that no public symbols
+ * will be removed or renamed. Types and names of the public struct members and
+ * values of public macros and enums will remain the same (unless they were
+ * explicitly declared as not part of the public API). Documented behavior will
+ * not change.
+ *
+ * In other words, any correct program that works with a given Libav snapshot
+ * should work just as well without any changes with any later snapshot with the
+ * same major versions. This applies to both rebuilding the program against new
+ * Libav versions or to replacing the dynamic Libav libraries that a program
+ * links against.
+ *
+ * However, new public symbols may be added and new members may be appended to
+ * public structs whose size is not part of public ABI (most public structs in
+ * Libav). New macros and enum values may be added. Behavior in undocumented
+ * situations may change slightly (and be documented). All those are accompanied
+ * by an entry in doc/APIchanges and incrementing either the minor or micro
+ * version number.
+ */
+
+/**
+ * @defgroup lavu Common utility functions
+ *
+ * @brief
+ * libavutil contains the code shared across all the other Libav
+ * libraries
+ *
+ * @note In order to use the functions provided by avutil you must include
+ * the specific header.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Maths
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_log Logging Facility
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup lavu_internal Internal
+ *
+ * Not exported functions, for internal usage only
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * @{
+ *
+ * @}
+ */
+
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char *avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char *avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)
+#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda
+#define FF_LAMBDA_MAX (256*128-1)
+
+#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove
+
+/**
+ * @}
+ * @defgroup lavu_time Timestamp specific
+ *
+ * Libav internal timebase and timestamp definitions
+ *
+ * @{
+ */
+
+/**
+ * @brief Undefined timestamp value
+ *
+ * Usually reported by demuxer that work on containers that do not provide
+ * either pts or dts.
+ */
+
+#define AV_NOPTS_VALUE INT64_C(0x8000000000000000)
+
+/**
+ * Internal time base represented as integer
+ */
+
+#define AV_TIME_BASE 1000000
+
+/**
+ * Internal time base represented as fractional value
+ */
+
+#define AV_TIME_BASE_Q (AVRational){1, AV_TIME_BASE}
+
+/**
+ * @}
+ * @}
+ * @defgroup lavu_picture Image related
+ *
+ * AVPicture types, pixel formats and basic image planes manipulation.
+ *
+ * @{
+ */
+
+enum AVPictureType {
+ AV_PICTURE_TYPE_I = 1, ///< Intra
+ AV_PICTURE_TYPE_P, ///< Predicted
+ AV_PICTURE_TYPE_B, ///< Bi-dir predicted
+ AV_PICTURE_TYPE_S, ///< S(GMC)-VOP MPEG4
+ AV_PICTURE_TYPE_SI, ///< Switching Intra
+ AV_PICTURE_TYPE_SP, ///< Switching Predicted
+ AV_PICTURE_TYPE_BI, ///< BI type
+};
+
+/**
+ * Return a single letter to describe the given picture type
+ * pict_type.
+ *
+ * @param[in] pict_type the picture type @return a single character
+ * representing the picture type, '?' if pict_type is unknown
+ */
+char av_get_picture_type_char(enum AVPictureType pict_type);
+
+/**
+ * @}
+ */
+
+#include "error.h"
+#include "version.h"
+#include "macros.h"
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AVUTIL_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/base64.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/base64.h
new file mode 100644
index 000000000..4750cf5c7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/base64.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BASE64_H
+#define AVUTIL_BASE64_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_base64 Base64
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+
+/**
+ * Decode a base64-encoded string.
+ *
+ * @param out buffer for decoded data
+ * @param in null-terminated input string
+ * @param out_size size in bytes of the out buffer, must be at
+ * least 3/4 of the length of in
+ * @return number of bytes written, or a negative value in case of
+ * invalid input
+ */
+int av_base64_decode(uint8_t *out, const char *in, int out_size);
+
+/**
+ * Encode data to base64 and null-terminate.
+ *
+ * @param out buffer for encoded data
+ * @param out_size size in bytes of the output buffer, must be at
+ * least AV_BASE64_SIZE(in_size)
+ * @param in_size size in bytes of the 'in' buffer
+ * @return 'out' or NULL in case of error
+ */
+char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);
+
+/**
+ * Calculate the output size needed to base64-encode x bytes.
+ */
+#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1)
+
+ /**
+ * @}
+ */
+
+#endif /* AVUTIL_BASE64_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/blowfish.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/blowfish.h
new file mode 100644
index 000000000..8c29536cf
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/blowfish.h
@@ -0,0 +1,76 @@
+/*
+ * Blowfish algorithm
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BLOWFISH_H
+#define AVUTIL_BLOWFISH_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_blowfish Blowfish
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#define AV_BF_ROUNDS 16
+
+typedef struct AVBlowfish {
+ uint32_t p[AV_BF_ROUNDS + 2];
+ uint32_t s[4][256];
+} AVBlowfish;
+
+/**
+ * Initialize an AVBlowfish context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param key a key
+ * @param key_len length of the key
+ */
+void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param xl left four bytes halves of input to be encrypted
+ * @param xr right four bytes halves of input to be encrypted
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,
+ int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BLOWFISH_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/bswap.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/bswap.h
new file mode 100644
index 000000000..93a6016b8
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/bswap.h
@@ -0,0 +1,111 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * byte swapping routines
+ */
+
+#ifndef AVUTIL_BSWAP_H
+#define AVUTIL_BSWAP_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_AARCH64
+# include "aarch64/bswap.h"
+#elif ARCH_ARM
+# include "arm/bswap.h"
+#elif ARCH_AVR32
+# include "avr32/bswap.h"
+#elif ARCH_BFIN
+# include "bfin/bswap.h"
+#elif ARCH_SH4
+# include "sh4/bswap.h"
+#elif ARCH_X86
+# include "x86/bswap.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff))
+#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))
+#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))
+
+#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)
+
+#ifndef av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+ x= (x>>8) | (x<<8);
+ return x;
+}
+#endif
+
+#ifndef av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return AV_BSWAP32C(x);
+}
+#endif
+
+#ifndef av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
+}
+#endif
+
+// be2ne ... big-endian to native-endian
+// le2ne ... little-endian to native-endian
+
+#if AV_HAVE_BIGENDIAN
+#define av_be2ne16(x) (x)
+#define av_be2ne32(x) (x)
+#define av_be2ne64(x) (x)
+#define av_le2ne16(x) av_bswap16(x)
+#define av_le2ne32(x) av_bswap32(x)
+#define av_le2ne64(x) av_bswap64(x)
+#define AV_BE2NEC(s, x) (x)
+#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)
+#else
+#define av_be2ne16(x) av_bswap16(x)
+#define av_be2ne32(x) av_bswap32(x)
+#define av_be2ne64(x) av_bswap64(x)
+#define av_le2ne16(x) (x)
+#define av_le2ne32(x) (x)
+#define av_le2ne64(x) (x)
+#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)
+#define AV_LE2NEC(s, x) (x)
+#endif
+
+#define AV_BE2NE16C(x) AV_BE2NEC(16, x)
+#define AV_BE2NE32C(x) AV_BE2NEC(32, x)
+#define AV_BE2NE64C(x) AV_BE2NEC(64, x)
+#define AV_LE2NE16C(x) AV_LE2NEC(16, x)
+#define AV_LE2NE32C(x) AV_LE2NEC(32, x)
+#define AV_LE2NE64C(x) AV_LE2NEC(64, x)
+
+#endif /* AVUTIL_BSWAP_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/buffer.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/buffer.h
new file mode 100644
index 000000000..56b4d020e
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/buffer.h
@@ -0,0 +1,267 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_buffer
+ * refcounted data buffer API
+ */
+
+#ifndef AVUTIL_BUFFER_H
+#define AVUTIL_BUFFER_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_buffer AVBuffer
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBuffer is an API for reference-counted data buffers.
+ *
+ * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer
+ * represents the data buffer itself; it is opaque and not meant to be accessed
+ * by the caller directly, but only through AVBufferRef. However, the caller may
+ * e.g. compare two AVBuffer pointers to check whether two different references
+ * are describing the same data buffer. AVBufferRef represents a single
+ * reference to an AVBuffer and it is the object that may be manipulated by the
+ * caller directly.
+ *
+ * There are two functions provided for creating a new AVBuffer with a single
+ * reference -- av_buffer_alloc() to just allocate a new buffer, and
+ * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing
+ * reference, additional references may be created with av_buffer_ref().
+ * Use av_buffer_unref() to free a reference (this will automatically free the
+ * data once all the references are freed).
+ *
+ * The convention throughout this API and the rest of Libav is such that the
+ * buffer is considered writable if there exists only one reference to it (and
+ * it has not been marked as read-only). The av_buffer_is_writable() function is
+ * provided to check whether this is true and av_buffer_make_writable() will
+ * automatically create a new writable buffer when necessary.
+ * Of course nothing prevents the calling code from violating this convention,
+ * however that is safe only when all the existing references are under its
+ * control.
+ *
+ * @note Referencing and unreferencing the buffers is thread-safe and thus
+ * may be done from multiple threads simultaneously without any need for
+ * additional locking.
+ *
+ * @note Two different references to the same buffer can point to different
+ * parts of the buffer (i.e. their AVBufferRef.data will not be equal).
+ */
+
+/**
+ * A reference counted buffer type. It is opaque and is meant to be used through
+ * references (AVBufferRef).
+ */
+typedef struct AVBuffer AVBuffer;
+
+/**
+ * A reference to a data buffer.
+ *
+ * The size of this struct is not a part of the public ABI and it is not meant
+ * to be allocated directly.
+ */
+typedef struct AVBufferRef {
+ AVBuffer *buffer;
+
+ /**
+ * The data buffer. It is considered writable if and only if
+ * this is the only reference to the buffer, in which case
+ * av_buffer_is_writable() returns 1.
+ */
+ uint8_t *data;
+ /**
+ * Size of data in bytes.
+ */
+ int size;
+} AVBufferRef;
+
+/**
+ * Allocate an AVBuffer of the given size using av_malloc().
+ *
+ * @return an AVBufferRef of given size or NULL when out of memory
+ */
+AVBufferRef *av_buffer_alloc(int size);
+
+/**
+ * Same as av_buffer_alloc(), except the returned buffer will be initialized
+ * to zero.
+ */
+AVBufferRef *av_buffer_allocz(int size);
+
+/**
+ * Always treat the buffer as read-only, even when it has only one
+ * reference.
+ */
+#define AV_BUFFER_FLAG_READONLY (1 << 0)
+
+/**
+ * Create an AVBuffer from an existing array.
+ *
+ * If this function is successful, data is owned by the AVBuffer. The caller may
+ * only access data through the returned AVBufferRef and references derived from
+ * it.
+ * If this function fails, data is left untouched.
+ * @param data data array
+ * @param size size of data in bytes
+ * @param free a callback for freeing this buffer's data
+ * @param opaque parameter to be passed to free
+ * @param flags a combination of AV_BUFFER_FLAG_*
+ *
+ * @return an AVBufferRef referring to data on success, NULL on failure.
+ */
+AVBufferRef *av_buffer_create(uint8_t *data, int size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags);
+
+/**
+ * Default free callback, which calls av_free() on the buffer data.
+ * This function is meant to be passed to av_buffer_create(), not called
+ * directly.
+ */
+void av_buffer_default_free(void *opaque, uint8_t *data);
+
+/**
+ * Create a new reference to an AVBuffer.
+ *
+ * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
+ * failure.
+ */
+AVBufferRef *av_buffer_ref(AVBufferRef *buf);
+
+/**
+ * Free a given reference and automatically free the buffer if there are no more
+ * references to it.
+ *
+ * @param buf the reference to be freed. The pointer is set to NULL on return.
+ */
+void av_buffer_unref(AVBufferRef **buf);
+
+/**
+ * @return 1 if the caller may write to the data referred to by buf (which is
+ * true if and only if buf is the only reference to the underlying AVBuffer).
+ * Return 0 otherwise.
+ * A positive answer is valid until av_buffer_ref() is called on buf.
+ */
+int av_buffer_is_writable(const AVBufferRef *buf);
+
+/**
+ * Create a writable reference from a given buffer reference, avoiding data copy
+ * if possible.
+ *
+ * @param buf buffer reference to make writable. On success, buf is either left
+ * untouched, or it is unreferenced and a new writable AVBufferRef is
+ * written in its place. On failure, buf is left untouched.
+ * @return 0 on success, a negative AVERROR on failure.
+ */
+int av_buffer_make_writable(AVBufferRef **buf);
+
+/**
+ * Reallocate a given buffer.
+ *
+ * @param buf a buffer reference to reallocate. On success, buf will be
+ * unreferenced and a new reference with the required size will be
+ * written in its place. On failure buf will be left untouched. *buf
+ * may be NULL, then a new buffer is allocated.
+ * @param size required new buffer size.
+ * @return 0 on success, a negative AVERROR on failure.
+ *
+ * @note the buffer is actually reallocated with av_realloc() only if it was
+ * initially allocated through av_buffer_realloc(NULL) and there is only one
+ * reference to it (i.e. the one passed to this function). In all other cases
+ * a new buffer is allocated and the data is copied.
+ */
+int av_buffer_realloc(AVBufferRef **buf, int size);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_bufferpool AVBufferPool
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.
+ *
+ * Frequently allocating and freeing large buffers may be slow. AVBufferPool is
+ * meant to solve this in cases when the caller needs a set of buffers of the
+ * same size (the most obvious use case being buffers for raw video or audio
+ * frames).
+ *
+ * At the beginning, the user must call av_buffer_pool_init() to create the
+ * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to
+ * get a reference to a new buffer, similar to av_buffer_alloc(). This new
+ * reference works in all aspects the same way as the one created by
+ * av_buffer_alloc(). However, when the last reference to this buffer is
+ * unreferenced, it is returned to the pool instead of being freed and will be
+ * reused for subsequent av_buffer_pool_get() calls.
+ *
+ * When the caller is done with the pool and no longer needs to allocate any new
+ * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.
+ * Once all the buffers are released, it will automatically be freed.
+ *
+ * Allocating and releasing buffers with this API is thread-safe as long as
+ * either the default alloc callback is used, or the user-supplied one is
+ * thread-safe.
+ */
+
+/**
+ * The buffer pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with av_buffer_pool_init() and freed with
+ * av_buffer_pool_uninit().
+ */
+typedef struct AVBufferPool AVBufferPool;
+
+/**
+ * Allocate and initialize a buffer pool.
+ *
+ * @param size size of each buffer in this pool
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
+
+/**
+ * Mark the pool as being available for freeing. It will actually be freed only
+ * once all the allocated buffers associated with the pool are released. Thus it
+ * is safe to call this function while some of the allocated buffers are still
+ * in use.
+ *
+ * @param pool pointer to the pool to be freed. It will be set to NULL.
+ * @see av_buffer_pool_can_uninit()
+ */
+void av_buffer_pool_uninit(AVBufferPool **pool);
+
+/**
+ * Allocate a new AVBuffer, reusing an old buffer from the pool when available.
+ * This function may be called simultaneously from multiple threads.
+ *
+ * @return a reference to the new buffer on success, NULL on error.
+ */
+AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BUFFER_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/channel_layout.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/channel_layout.h
new file mode 100644
index 000000000..6a1f83005
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/channel_layout.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include <stdint.h>
+
+/**
+ * @file
+ * audio channel layout utility functions
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ * @{
+ */
+#define AV_CH_FRONT_LEFT 0x00000001
+#define AV_CH_FRONT_RIGHT 0x00000002
+#define AV_CH_FRONT_CENTER 0x00000004
+#define AV_CH_LOW_FREQUENCY 0x00000008
+#define AV_CH_BACK_LEFT 0x00000010
+#define AV_CH_BACK_RIGHT 0x00000020
+#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
+#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
+#define AV_CH_BACK_CENTER 0x00000100
+#define AV_CH_SIDE_LEFT 0x00000200
+#define AV_CH_SIDE_RIGHT 0x00000400
+#define AV_CH_TOP_CENTER 0x00000800
+#define AV_CH_TOP_FRONT_LEFT 0x00001000
+#define AV_CH_TOP_FRONT_CENTER 0x00002000
+#define AV_CH_TOP_FRONT_RIGHT 0x00004000
+#define AV_CH_TOP_BACK_LEFT 0x00008000
+#define AV_CH_TOP_BACK_CENTER 0x00010000
+#define AV_CH_TOP_BACK_RIGHT 0x00020000
+#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
+#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
+#define AV_CH_WIDE_LEFT 0x0000000080000000ULL
+#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
+#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
+#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
+#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order. */
+#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel convenience macros
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+
+enum AVMatrixEncoding {
+ AV_MATRIX_ENCODING_NONE,
+ AV_MATRIX_ENCODING_DOLBY,
+ AV_MATRIX_ENCODING_DPLII,
+ AV_MATRIX_ENCODING_DPLIIX,
+ AV_MATRIX_ENCODING_DPLIIZ,
+ AV_MATRIX_ENCODING_DOLBYEX,
+ AV_MATRIX_ENCODING_DOLBYHEADPHONE,
+ AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * @}
+ */
+
+/**
+ * Return a channel layout id that matches name, or 0 if no match is found.
+ *
+ * name can be one or several of the following notations,
+ * separated by '+' or '|':
+ * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
+ * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
+ * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
+ * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
+ * - a number of channels, in decimal, optionally followed by 'c', yielding
+ * the default channel layout for that number of channels (@see
+ * av_get_default_channel_layout);
+ * - a channel layout mask, in hexadecimal starting with "0x" (see the
+ * AV_CH_* macros).
+ *
+ * Example: "stereo+FC" = "2+FC" = "2c+1c" = "0x7"
+ */
+uint64_t av_get_channel_layout(const char *name);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ */
+void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ */
+int av_get_channel_layout_nb_channels(uint64_t channel_layout);
+
+/**
+ * Return default channel layout for a given number of channels.
+ */
+uint64_t av_get_default_channel_layout(int nb_channels);
+
+/**
+ * Get the index of a channel in channel_layout.
+ *
+ * @param channel a channel layout describing exactly one channel which must be
+ * present in channel_layout.
+ *
+ * @return index of channel in channel_layout on success, a negative AVERROR
+ * on error.
+ */
+int av_get_channel_layout_channel_index(uint64_t channel_layout,
+ uint64_t channel);
+
+/**
+ * Get the channel with the given index in channel_layout.
+ */
+uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
+
+/**
+ * Get the name of a given channel.
+ *
+ * @return channel name on success, NULL on error.
+ */
+const char *av_get_channel_name(uint64_t channel);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/common.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/common.h
new file mode 100644
index 000000000..eb40e1299
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/common.h
@@ -0,0 +1,406 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <math.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "attributes.h"
+#include "version.h"
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))
+
+/* misc math functions */
+
+#if FF_API_AV_REVERSE
+extern attribute_deprecated const uint8_t av_reverse[256];
+#endif
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+#endif
+
+/* Pull in unguarded fallback defines at the end of this file. */
+#include "common.h"
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
+{
+ if (a&(~0xFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a)
+{
+ if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
+{
+ if (a&(~0xFFFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a)
+{
+ if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
+ else return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
+{
+ if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (a>>63) ^ 0x7FFFFFFF;
+ else return a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+ if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);
+ else return a;
+}
+
+/**
+ * Add two signed 32-bit values with saturation.
+ *
+ * @param a one value
+ * @param b another value
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_add32_c(int a, int b)
+{
+ return av_clipl_int32((int64_t)a + b);
+}
+
+/**
+ * Add a doubled value to another value with saturation at both stages.
+ *
+ * @param a first value
+ * @param b value doubled and added to a
+ * @return sum with signed saturation
+ */
+static av_always_inline int av_sat_dadd32_c(int a, int b)
+{
+ return av_sat_add32(a, av_sat_add32(b, b));
+}
+
+/**
+ * Clip a float value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const float av_clipf_c(float a, float amin, float amax)
+{
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x)
+{
+ return av_log2((x - 1) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x)
+{
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x)
+{
+ return av_popcount(x) + av_popcount(x >> 32);
+}
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR)\
+ val= GET_BYTE;\
+ {\
+ uint32_t top = (val & 128) >> 1;\
+ if ((val & 0xc0) == 0x80)\
+ ERROR\
+ while (val & top) {\
+ int tmp= GET_BYTE - 128;\
+ if(tmp>>6)\
+ ERROR\
+ val= (val<<6) + tmp;\
+ top <<= 5;\
+ }\
+ val &= (top << 1) - 1;\
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR)\
+ val = GET_16BIT;\
+ {\
+ unsigned int hi = val - 0xD800;\
+ if (hi < 0x800) {\
+ val = GET_16BIT - 0xDC00;\
+ if (val > 0x3FFU || hi > 0x3FFU)\
+ ERROR\
+ val += (hi<<10) + 0x10000;\
+ }\
+ }\
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+ {\
+ int bytes, shift;\
+ uint32_t in = val;\
+ if (in < 0x80) {\
+ tmp = in;\
+ PUT_BYTE\
+ } else {\
+ bytes = (av_log2(in) + 4) / 5;\
+ shift = (bytes - 1) * 6;\
+ tmp = (256 - (256 >> bytes)) | (in >> shift);\
+ PUT_BYTE\
+ while (shift >= 6) {\
+ shift -= 6;\
+ tmp = 0x80 | ((in >> shift) & 0x3f);\
+ PUT_BYTE\
+ }\
+ }\
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT)\
+ {\
+ uint32_t in = val;\
+ if (in < 0x10000) {\
+ tmp = in;\
+ PUT_16BIT\
+ } else {\
+ tmp = 0xD800 | ((in - 0x10000) >> 10);\
+ PUT_16BIT\
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
+ PUT_16BIT\
+ }\
+ }\
+
+
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
+
+/*
+ * The following definitions are outside the multiple inclusion guard
+ * to ensure they are immediately available in intmath.h.
+ */
+
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/cpu.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/cpu.h
new file mode 100644
index 000000000..29036e394
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/cpu.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include "version.h"
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+ /* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#if FF_API_CPU_FLAG_MMX2
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#endif
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+#define AV_CPU_FLAG_CMOV 0x1000 ///< i686 cmov
+#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used
+
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+
+#define AV_CPU_FLAG_ARMV5TE (1 << 0)
+#define AV_CPU_FLAG_ARMV6 (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
+#define AV_CPU_FLAG_VFP (1 << 3)
+#define AV_CPU_FLAG_VFPV3 (1 << 4)
+#define AV_CPU_FLAG_NEON (1 << 5)
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Set a mask on flags returned by av_get_cpu_flags().
+ * This function is mainly useful for testing.
+ *
+ * @warning this function is not thread safe.
+ */
+void av_set_cpu_flags_mask(int mask);
+
+/**
+ * Parse CPU flags from a string.
+ *
+ * @return a combination of AV_CPU_* flags, negative on error.
+ */
+int av_parse_cpu_flags(const char *s);
+
+/**
+ * @return the number of logical CPU cores present.
+ */
+int av_cpu_count(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/crc.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/crc.h
new file mode 100644
index 000000000..0540619d2
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/crc.h
@@ -0,0 +1,74 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CRC_H
+#define AVUTIL_CRC_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include "attributes.h"
+
+typedef uint32_t AVCRC;
+
+typedef enum {
+ AV_CRC_8_ATM,
+ AV_CRC_16_ANSI,
+ AV_CRC_16_CCITT,
+ AV_CRC_32_IEEE,
+ AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
+ AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
+}AVCRCId;
+
+/**
+ * Initialize a CRC table.
+ * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024
+ * @param le If 1, the lowest bit represents the coefficient for the highest
+ * exponent of the corresponding polynomial (both for poly and
+ * actual CRC).
+ * If 0, you must swap the CRC parameter and the result of av_crc
+ * if you need the standard representation (can be simplified in
+ * most cases to e.g. bswap16):
+ * av_bswap32(crc << (32-bits))
+ * @param bits number of bits for the CRC
+ * @param poly generator polynomial without the x**bits coefficient, in the
+ * representation as specified by le
+ * @param ctx_size size of ctx in bytes
+ * @return <0 on failure
+ */
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
+
+/**
+ * Get an initialized standard CRC table.
+ * @param crc_id ID of a standard CRC
+ * @return a pointer to the CRC table or NULL on failure
+ */
+const AVCRC *av_crc_get_table(AVCRCId crc_id);
+
+/**
+ * Calculate the CRC of a block.
+ * @param crc CRC of previous blocks if any or initial value for CRC
+ * @return CRC updated with the data from the given block
+ *
+ * @see av_crc_init() "le" parameter
+ */
+uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
+ const uint8_t *buffer, size_t length) av_pure;
+
+#endif /* AVUTIL_CRC_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/dict.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/dict.h
new file mode 100644
index 000000000..e0a91ae83
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/dict.h
@@ -0,0 +1,146 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key:value pairs. To create
+ * an AVDictionary, simply pass an address of a NULL pointer to
+ * av_dict_set(). NULL can be used as an empty dictionary wherever
+ * a pointer to an AVDictionary is required.
+ * Use av_dict_get() to retrieve an entry or iterate over all
+ * entries and finally av_dict_free() to free the dictionary
+ * and all its contents.
+ *
+ @code
+ AVDictionary *d = NULL; // "create" an empty dictionary
+ AVDictionaryEntry *t = NULL;
+
+ av_dict_set(&d, "foo", "bar", 0); // add an entry
+
+ char *k = av_strdup("key"); // if your strings are already allocated,
+ char *v = av_strdup("value"); // you can avoid copying them like this
+ av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+ while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
+ <....> // iterate over all entries in d
+ }
+ av_dict_free(&d);
+ @endcode
+ *
+ */
+
+#define AV_DICT_MATCH_CASE 1
+#define AV_DICT_IGNORE_SUFFIX 2
+#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
+ allocated with av_malloc() and children. */
+#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
+ allocated with av_malloc() and chilren. */
+#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
+#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
+ delimiter is added, the strings are simply concatenated. */
+
+typedef struct AVDictionaryEntry {
+ char *key;
+ char *value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param flags Allows case as well as suffix-insensitive comparisons.
+ * @return Found entry or NULL, changing key or value leads to undefined behavior.
+ */
+AVDictionaryEntry *
+av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary *m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key entry key to add to *pm (will be av_strduped depending on flags)
+ * @param value entry value to add to *pm (will be av_strduped depending on flags).
+ * Passing a NULL value will cause an existing entry to be deleted.
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
+
+/**
+ * Parse the key/value pairs list and add to a dictionary.
+ *
+ * @param key_val_sep a 0-terminated list of characters used to separate
+ * key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @param flags flags to use when adding to dictionary.
+ * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL
+ * are ignored since the key/value tokens will always
+ * be duplicated.
+ * @return 0 on success, negative AVERROR code on failure
+ */
+int av_dict_parse_string(AVDictionary **pm, const char *str,
+ const char *key_val_sep, const char *pairs_sep,
+ int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src pointer to source AVDictionary struct
+ * @param flags flags to use when setting entries in *dst
+ * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ */
+void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary **m);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/downmix_info.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/downmix_info.h
new file mode 100644
index 000000000..69969f6fb
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/downmix_info.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2014 Tim Walker <tdskywalker@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DOWNMIX_INFO_H
+#define AVUTIL_DOWNMIX_INFO_H
+
+#include "frame.h"
+
+/**
+ * @file
+ * audio downmix medatata
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup downmix_info Audio downmix metadata
+ * @{
+ */
+
+/**
+ * Possible downmix types.
+ */
+enum AVDownmixType {
+ AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */
+ AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */
+ AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */
+ AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */
+ AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */
+};
+
+/**
+ * This structure describes optional metadata relevant to a downmix procedure.
+ *
+ * All fields are set by the decoder to the value indicated in the audio
+ * bitstream (if present), or to a "sane" default otherwise.
+ */
+typedef struct AVDownmixInfo {
+ /**
+ * Type of downmix preferred by the mastering engineer.
+ */
+ enum AVDownmixType preferred_downmix_type;
+
+ /**
+ * Absolute scale factor representing the nominal level of the center
+ * channel during a regular downmix.
+ */
+ double center_mix_level;
+
+ /**
+ * Absolute scale factor representing the nominal level of the center
+ * channel during an Lt/Rt compatible downmix.
+ */
+ double center_mix_level_ltrt;
+
+ /**
+ * Absolute scale factor representing the nominal level of the surround
+ * channels during a regular downmix.
+ */
+ double surround_mix_level;
+
+ /**
+ * Absolute scale factor representing the nominal level of the surround
+ * channels during an Lt/Rt compatible downmix.
+ */
+ double surround_mix_level_ltrt;
+
+ /**
+ * Absolute scale factor representing the level at which the LFE data is
+ * mixed into L/R channels during downmixing.
+ */
+ double lfe_mix_level;
+} AVDownmixInfo;
+
+/**
+ * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing.
+ *
+ * The side data is created and added to the frame if it's absent.
+ *
+ * @param frame the frame for which the side data is to be obtained.
+ *
+ * @return the AVDownmixInfo structure to be edited by the caller.
+ */
+AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DOWNMIX_INFO_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/error.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/error.h
new file mode 100644
index 000000000..268a0320a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/error.h
@@ -0,0 +1,82 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include <errno.h>
+#include <stddef.h>
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+
+/* error handling */
+#if EDOM > 0
+#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions.
+#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+
+#define AVERROR_BSF_NOT_FOUND (-0x39acbd08) ///< Bitstream filter not found
+#define AVERROR_DECODER_NOT_FOUND (-0x3cbabb08) ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND (-0x32babb08) ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND (-0x3cb1ba08) ///< Encoder not found
+#define AVERROR_EOF (-0x5fb9b0bb) ///< End of file
+#define AVERROR_EXIT (-0x2bb6a7bb) ///< Immediate exit was requested; the called function should not be restarted
+#define AVERROR_FILTER_NOT_FOUND (-0x33b6b908) ///< Filter not found
+#define AVERROR_INVALIDDATA (-0x3ebbb1b7) ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND (-0x27aab208) ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND (-0x2bafb008) ///< Option not found
+#define AVERROR_PATCHWELCOME (-0x3aa8beb0) ///< Not yet implemented in Libav, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND (-0x30adaf08) ///< Protocol not found
+#define AVERROR_STREAM_NOT_FOUND (-0x2dabac08) ///< Stream not found
+#define AVERROR_BUG (-0x5fb8aabe) ///< Bug detected, please report the issue
+#define AVERROR_UNKNOWN (-0x31b4b1ab) ///< Unknown error, typically from an external library
+#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum error code to describe
+ * @param errbuf buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/eval.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/eval.h
new file mode 100644
index 000000000..ccb29e7a3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/eval.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple arithmetic expression evaluator
+ */
+
+#ifndef AVUTIL_EVAL_H
+#define AVUTIL_EVAL_H
+
+#include "avutil.h"
+
+typedef struct AVExpr AVExpr;
+
+/**
+ * Parse and evaluate an expression.
+ * Note, this is significantly slower than av_expr_eval().
+ *
+ * @param res a pointer to a double where is put the result value of
+ * the expression, or NAN in case of error
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param const_values a zero terminated array of values for the identifiers from const_names
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @param log_ctx parent logging context
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse_and_eval(double *res, const char *s,
+ const char * const *const_names, const double *const_values,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ void *opaque, int log_offset, void *log_ctx);
+
+/**
+ * Parse an expression.
+ *
+ * @param expr a pointer where is put an AVExpr containing the parsed
+ * value in case of successful parsing, or NULL otherwise.
+ * The pointed to AVExpr must be freed with av_expr_free() by the user
+ * when it is not needed anymore.
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param log_ctx parent logging context
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse(AVExpr **expr, const char *s,
+ const char * const *const_names,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ int log_offset, void *log_ctx);
+
+/**
+ * Evaluate a previously parsed expression.
+ *
+ * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @return the value of the expression
+ */
+double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
+
+/**
+ * Free a parsed expression previously created with av_expr_parse().
+ */
+void av_expr_free(AVExpr *e);
+
+/**
+ * Parse the string in numstr and return its value as a double. If
+ * the string is empty, contains only whitespaces, or does not contain
+ * an initial substring that has the expected syntax for a
+ * floating-point number, no conversion is performed. In this case,
+ * returns a value of zero and the value returned in tail is the value
+ * of numstr.
+ *
+ * @param numstr a string representing a number, may contain one of
+ * the International System number postfixes, for example 'K', 'M',
+ * 'G'. If 'i' is appended after the postfix, powers of 2 are used
+ * instead of powers of 10. The 'B' postfix multiplies the value for
+ * 8, and can be appended after another postfix or used alone. This
+ * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
+ * @param tail if non-NULL puts here the pointer to the char next
+ * after the last parsed character
+ */
+double av_strtod(const char *numstr, char **tail);
+
+#endif /* AVUTIL_EVAL_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/fifo.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/fifo.h
new file mode 100644
index 000000000..ea30f5d2b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/fifo.h
@@ -0,0 +1,131 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a very simple circular buffer FIFO implementation
+ */
+
+#ifndef AVUTIL_FIFO_H
+#define AVUTIL_FIFO_H
+
+#include <stdint.h>
+#include "avutil.h"
+#include "attributes.h"
+
+typedef struct AVFifoBuffer {
+ uint8_t *buffer;
+ uint8_t *rptr, *wptr, *end;
+ uint32_t rndx, wndx;
+} AVFifoBuffer;
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param size of FIFO
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ */
+AVFifoBuffer *av_fifo_alloc(unsigned int size);
+
+/**
+ * Free an AVFifoBuffer.
+ * @param f AVFifoBuffer to free
+ */
+void av_fifo_free(AVFifoBuffer *f);
+
+/**
+ * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
+ * @param f AVFifoBuffer to reset
+ */
+void av_fifo_reset(AVFifoBuffer *f);
+
+/**
+ * Return the amount of data in bytes in the AVFifoBuffer, that is the
+ * amount of data you can read from it.
+ * @param f AVFifoBuffer to read from
+ * @return size
+ */
+int av_fifo_size(AVFifoBuffer *f);
+
+/**
+ * Return the amount of space in bytes in the AVFifoBuffer, that is the
+ * amount of data you can write into it.
+ * @param f AVFifoBuffer to write into
+ * @return size
+ */
+int av_fifo_space(AVFifoBuffer *f);
+
+/**
+ * Feed data from an AVFifoBuffer to a user-supplied callback.
+ * @param f AVFifoBuffer to read from
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ */
+int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from a user-supplied callback to an AVFifoBuffer.
+ * @param f AVFifoBuffer to write to
+ * @param src data source; non-const since it may be used as a
+ * modifiable context by the function defined in func
+ * @param size number of bytes to write
+ * @param func generic write function; the first parameter is src,
+ * the second is dest_buf, the third is dest_buf_size.
+ * func must return the number of bytes written to dest_buf, or <= 0 to
+ * indicate no more data available to write.
+ * If func is NULL, src is interpreted as a simple byte array for source data.
+ * @return the number of bytes written to the FIFO
+ */
+int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
+
+/**
+ * Resize an AVFifoBuffer.
+ * @param f AVFifoBuffer to resize
+ * @param size new AVFifoBuffer size in bytes
+ * @return <0 for failure, >=0 otherwise
+ */
+int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
+
+/**
+ * Read and discard the specified amount of data from an AVFifoBuffer.
+ * @param f AVFifoBuffer to read from
+ * @param size amount of data to read in bytes
+ */
+void av_fifo_drain(AVFifoBuffer *f, int size);
+
+/**
+ * Return a pointer to the data stored in a FIFO buffer at a certain offset.
+ * The FIFO buffer is not modified.
+ *
+ * @param f AVFifoBuffer to peek at, f must be non-NULL
+ * @param offs an offset in bytes, its absolute value must be less
+ * than the used buffer size or the returned pointer will
+ * point outside to the buffer data.
+ * The used buffer size can be checked with av_fifo_size().
+ */
+static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
+{
+ uint8_t *ptr = f->rptr + offs;
+ if (ptr >= f->end)
+ ptr = f->buffer + (ptr - f->end);
+ else if (ptr < f->buffer)
+ ptr = f->end - (f->buffer - ptr);
+ return ptr;
+}
+
+#endif /* AVUTIL_FIFO_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/file.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/file.h
new file mode 100644
index 000000000..e3f02a830
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/file.h
@@ -0,0 +1,54 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FILE_H
+#define AVUTIL_FILE_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+
+/**
+ * @file
+ * Misc file utilities.
+ */
+
+/**
+ * Read the file with name filename, and put its content in a newly
+ * allocated buffer or map it with mmap() when available.
+ * In case of success set *bufptr to the read or mmapped buffer, and
+ * *size to the size in bytes of the buffer in *bufptr.
+ * The returned buffer must be released with av_file_unmap().
+ *
+ * @param log_offset loglevel offset used for logging
+ * @param log_ctx context used for logging
+ * @return a non negative number in case of success, a negative value
+ * corresponding to an AVERROR error code in case of failure
+ */
+int av_file_map(const char *filename, uint8_t **bufptr, size_t *size,
+ int log_offset, void *log_ctx);
+
+/**
+ * Unmap or free the buffer bufptr created by av_file_map().
+ *
+ * @param size size in bytes of bufptr, must be the same as returned
+ * by av_file_map()
+ */
+void av_file_unmap(uint8_t *bufptr, size_t size);
+
+#endif /* AVUTIL_FILE_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/frame.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/frame.h
new file mode 100644
index 000000000..63ed219f4
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/frame.h
@@ -0,0 +1,552 @@
+/*
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_frame
+ * reference-counted frame API
+ */
+
+#ifndef AVUTIL_FRAME_H
+#define AVUTIL_FRAME_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+#include "buffer.h"
+#include "dict.h"
+#include "rational.h"
+#include "samplefmt.h"
+#include "version.h"
+
+
+/**
+ * @defgroup lavu_frame AVFrame
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVFrame is an abstraction for reference-counted raw multimedia data.
+ */
+
+enum AVFrameSideDataType {
+ /**
+ * The data is the AVPanScan struct defined in libavcodec.
+ */
+ AV_FRAME_DATA_PANSCAN,
+ /**
+ * ATSC A53 Part 4 Closed Captions.
+ * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
+ * The number of bytes of CC data is AVFrameSideData.size.
+ */
+ AV_FRAME_DATA_A53_CC,
+ /**
+ * Stereoscopic 3d metadata.
+ * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
+ */
+ AV_FRAME_DATA_STEREO3D,
+ /**
+ * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
+ */
+ AV_FRAME_DATA_MATRIXENCODING,
+ /**
+ * Metadata relevant to a downmix procedure.
+ * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
+ */
+ AV_FRAME_DATA_DOWNMIX_INFO,
+};
+
+typedef struct AVFrameSideData {
+ enum AVFrameSideDataType type;
+ uint8_t *data;
+ int size;
+ AVDictionary *metadata;
+} AVFrameSideData;
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using av_frame_alloc(). Note that this only
+ * allocates the AVFrame itself, the buffers for the data must be managed
+ * through other means (see below).
+ * AVFrame must be freed with av_frame_free().
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, av_frame_unref() will free any references held by
+ * the frame and reset it to its original clean state before it
+ * is reused again.
+ *
+ * The data described by an AVFrame is usually reference counted through the
+ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
+ * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
+ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
+ * every single data plane must be contained in one of the buffers in
+ * AVFrame.buf or AVFrame.extended_buf.
+ * There may be a single buffer for all the data, or one separate buffer for
+ * each plane, or anything in between.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte
+ */
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For video, size in bytes of each picture line.
+ * For audio, size in bytes of each plane.
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * @note The linesize may be larger than the size of usable data -- there
+ * may be extra padding present for performance reasons.
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data should always be set in a valid frame,
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used in order to access all channels.
+ */
+ uint8_t **extended_data;
+
+ /**
+ * width and height of the video frame
+ */
+ int width, height;
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ */
+ int nb_samples;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum AVPixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ */
+ int format;
+
+ /**
+ * 1 -> keyframe, 0-> not
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame.
+ */
+ enum AVPictureType pict_type;
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ uint8_t *base[AV_NUM_DATA_POINTERS];
+#endif
+
+ /**
+ * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Presentation timestamp in time_base units (time when frame should be shown to user).
+ */
+ int64_t pts;
+
+ /**
+ * PTS copied from the AVPacket that was decoded to produce this frame.
+ */
+ int64_t pkt_pts;
+
+ /**
+ * DTS copied from the AVPacket that triggered returning this frame.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * picture number in bitstream order
+ */
+ int coded_picture_number;
+ /**
+ * picture number in display order
+ */
+ int display_picture_number;
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ */
+ int quality;
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ int reference;
+
+ /**
+ * QP table
+ */
+ attribute_deprecated
+ int8_t *qscale_table;
+ /**
+ * QP store stride
+ */
+ attribute_deprecated
+ int qstride;
+
+ attribute_deprecated
+ int qscale_type;
+
+ /**
+ * mbskip_table[mb]>=1 if MB didn't change
+ * stride= mb_width = (width+15)>>4
+ */
+ attribute_deprecated
+ uint8_t *mbskip_table;
+
+ /**
+ * motion vector table
+ * @code
+ * example:
+ * int mv_sample_log2= 4 - motion_subsample_log2;
+ * int mb_width= (width+15)>>4;
+ * int mv_stride= (mb_width << mv_sample_log2) + 1;
+ * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
+ * @endcode
+ */
+ attribute_deprecated
+ int16_t (*motion_val[2])[2];
+
+ /**
+ * macroblock type table
+ * mb_type_base + mb_width + 2
+ */
+ attribute_deprecated
+ uint32_t *mb_type;
+
+ /**
+ * DCT coefficients
+ */
+ attribute_deprecated
+ short *dct_coeff;
+
+ /**
+ * motion reference frame index
+ * the order in which these are stored can depend on the codec.
+ */
+ attribute_deprecated
+ int8_t *ref_index[2];
+#endif
+
+ /**
+ * for some private data of the user
+ */
+ void *opaque;
+
+ /**
+ * error
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ int type;
+#endif
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ */
+ int repeat_pict;
+
+ /**
+ * The content of the picture is interlaced.
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ */
+ int top_field_first;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ */
+ int palette_has_changed;
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ int buffer_hints;
+
+ /**
+ * Pan scan.
+ */
+ attribute_deprecated
+ struct AVPanScan *pan_scan;
+#endif
+
+ /**
+ * reordered opaque 64bit (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ */
+ int64_t reordered_opaque;
+
+#if FF_API_AVFRAME_LAVC
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated void *hwaccel_picture_private;
+
+ attribute_deprecated
+ struct AVCodecContext *owner;
+ attribute_deprecated
+ void *thread_opaque;
+
+ /**
+ * log2 of the size of the block which a single vector in motion_val represents:
+ * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
+ */
+ attribute_deprecated
+ uint8_t motion_subsample_log2;
+#endif
+
+ /**
+ * Sample rate of the audio data.
+ */
+ int sample_rate;
+
+ /**
+ * Channel layout of the audio data.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * AVBuffer references backing the data for this frame. If all elements of
+ * this array are NULL, then this frame is not reference counted.
+ *
+ * There may be at most one AVBuffer per data plane, so for video this array
+ * always contains all the references. For planar audio with more than
+ * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
+ * this array. Then the extra AVBufferRef pointers are stored in the
+ * extended_buf array.
+ */
+ AVBufferRef *buf[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For planar audio which requires more than AV_NUM_DATA_POINTERS
+ * AVBufferRef pointers, this array will hold all the references which
+ * cannot fit into AVFrame.buf.
+ *
+ * Note that this is different from AVFrame.extended_data, which always
+ * contains all the pointers. This array only contains the extra pointers,
+ * which cannot fit into AVFrame.buf.
+ *
+ * This array is always allocated using av_malloc() by whoever constructs
+ * the frame. It is freed in av_frame_unref().
+ */
+ AVBufferRef **extended_buf;
+ /**
+ * Number of elements in extended_buf.
+ */
+ int nb_extended_buf;
+
+ AVFrameSideData **side_data;
+ int nb_side_data;
+
+/**
+ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
+ * Flags describing additional frame properties.
+ *
+ * @{
+ */
+
+/**
+ * The frame data may be corrupted, e.g. due to decoding errors.
+ */
+#define AV_FRAME_FLAG_CORRUPT (1 << 0)
+/**
+ * @}
+ */
+
+ /**
+ * Frame flags, a combination of @ref lavu_frame_flags
+ */
+ int flags;
+} AVFrame;
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct must be freed using av_frame_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVFrame itself, not the data buffers. Those
+ * must be allocated through other means, e.g. with av_frame_get_buffer() or
+ * manually.
+ */
+AVFrame *av_frame_alloc(void);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data. If the frame is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ */
+void av_frame_free(AVFrame **frame);
+
+/**
+ * Set up a new reference to the data described by the source frame.
+ *
+ * Copy frame properties from src to dst and create a new reference for each
+ * AVBufferRef from src.
+ *
+ * If src is not reference counted, new buffers are allocated and the data is
+ * copied.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_frame_ref(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Create a new frame that references the same data as src.
+ *
+ * This is a shortcut for av_frame_alloc()+av_frame_ref().
+ *
+ * @return newly created AVFrame on success, NULL on error.
+ */
+AVFrame *av_frame_clone(const AVFrame *src);
+
+/**
+ * Unreference all the buffers referenced by frame and reset the frame fields.
+ */
+void av_frame_unref(AVFrame *frame);
+
+/**
+ * Move everythnig contained in src to dst and reset src.
+ */
+void av_frame_move_ref(AVFrame *dst, AVFrame *src);
+
+/**
+ * Allocate new buffer(s) for audio or video data.
+ *
+ * The following fields must be set on frame before calling this function:
+ * - format (pixel format for video, sample format for audio)
+ * - width and height for video
+ * - nb_samples and channel_layout for audio
+ *
+ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
+ * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
+ * For planar formats, one buffer will be allocated for each plane.
+ *
+ * @param frame frame in which to store the new buffers.
+ * @param align required buffer size alignment
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_frame_get_buffer(AVFrame *frame, int align);
+
+/**
+ * Check if the frame data is writable.
+ *
+ * @return A positive value if the frame data is writable (which is true if and
+ * only if each of the underlying buffers has only one reference, namely the one
+ * stored in this frame). Return 0 otherwise.
+ *
+ * If 1 is returned the answer is valid until av_buffer_ref() is called on any
+ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
+ *
+ * @see av_frame_make_writable(), av_buffer_is_writable()
+ */
+int av_frame_is_writable(AVFrame *frame);
+
+/**
+ * Ensure that the frame data is writable, avoiding data copy if possible.
+ *
+ * Do nothing if the frame is writable, allocate new buffers and copy the data
+ * if it is not.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @see av_frame_is_writable(), av_buffer_is_writable(),
+ * av_buffer_make_writable()
+ */
+int av_frame_make_writable(AVFrame *frame);
+
+/**
+ * Copy only "metadata" fields from src to dst.
+ *
+ * Metadata for the purpose of this function are those fields that do not affect
+ * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample
+ * aspect ratio (for video), but not width/height or channel layout.
+ * Side data is also copied.
+ */
+int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Get the buffer reference a given data plane is stored in.
+ *
+ * @param plane index of the data plane of interest in frame->extended_data.
+ *
+ * @return the buffer reference that contains the plane or NULL if the input
+ * frame is not valid.
+ */
+AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
+
+/**
+ * Add a new side data to a frame.
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type type of the added side data
+ * @param size size of the side data
+ *
+ * @return newly added side data on success, NULL on error
+ */
+AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
+ enum AVFrameSideDataType type,
+ int size);
+
+/**
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this frame.
+ */
+AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
+ enum AVFrameSideDataType type);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FRAME_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/hmac.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/hmac.h
new file mode 100644
index 000000000..28c2062b1
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/hmac.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2012 Martin Storsjo
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HMAC_H
+#define AVUTIL_HMAC_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_hmac HMAC
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+enum AVHMACType {
+ AV_HMAC_MD5,
+ AV_HMAC_SHA1,
+};
+
+typedef struct AVHMAC AVHMAC;
+
+/**
+ * Allocate an AVHMAC context.
+ * @param type The hash function used for the HMAC.
+ */
+AVHMAC *av_hmac_alloc(enum AVHMACType type);
+
+/**
+ * Free an AVHMAC context.
+ * @param ctx The context to free, may be NULL
+ */
+void av_hmac_free(AVHMAC *ctx);
+
+/**
+ * Initialize an AVHMAC context with an authentication key.
+ * @param ctx The HMAC context
+ * @param key The authentication key
+ * @param keylen The length of the key, in bytes
+ */
+void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen);
+
+/**
+ * Hash data with the HMAC.
+ * @param ctx The HMAC context
+ * @param data The data to hash
+ * @param len The length of the data, in bytes
+ */
+void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len);
+
+/**
+ * Finish hashing and output the HMAC digest.
+ * @param ctx The HMAC context
+ * @param out The output buffer to write the digest into
+ * @param outlen The length of the out buffer, in bytes
+ * @return The number of bytes written to out, or a negative error code.
+ */
+int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen);
+
+/**
+ * Hash an array of data with a key.
+ * @param ctx The HMAC context
+ * @param data The data to hash
+ * @param len The length of the data, in bytes
+ * @param key The authentication key
+ * @param keylen The length of the key, in bytes
+ * @param out The output buffer to write the digest into
+ * @param outlen The length of the out buffer, in bytes
+ * @return The number of bytes written to out, or a negative error code.
+ */
+int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len,
+ const uint8_t *key, unsigned int keylen,
+ uint8_t *out, unsigned int outlen);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_HMAC_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/imgutils.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/imgutils.h
new file mode 100644
index 000000000..71510132a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/imgutils.h
@@ -0,0 +1,138 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_IMGUTILS_H
+#define AVUTIL_IMGUTILS_H
+
+/**
+ * @file
+ * misc image utilities
+ *
+ * @addtogroup lavu_picture
+ * @{
+ */
+
+#include "avutil.h"
+#include "pixdesc.h"
+
+/**
+ * Compute the max pixel step for each plane of an image with a
+ * format described by pixdesc.
+ *
+ * The pixel step is the distance in bytes between the first byte of
+ * the group of bytes which describe a pixel component and the first
+ * byte of the successive group in the same plane for the same
+ * component.
+ *
+ * @param max_pixsteps an array which is filled with the max pixel step
+ * for each plane. Since a plane may contain different pixel
+ * components, the computed max_pixsteps[plane] is relative to the
+ * component in the plane with the max pixel step.
+ * @param max_pixstep_comps an array which is filled with the component
+ * for each plane which has the max pixel step. May be NULL.
+ */
+void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
+ const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Compute the size of an image line with format pix_fmt and width
+ * width for the plane plane.
+ *
+ * @return the computed size in bytes
+ */
+int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);
+
+/**
+ * Fill plane linesizes for an image with pixel format pix_fmt and
+ * width width.
+ *
+ * @param linesizes array to be filled with the linesize for each plane
+ * @return >= 0 in case of success, a negative error code otherwise
+ */
+int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);
+
+/**
+ * Fill plane data pointers for an image with pixel format pix_fmt and
+ * height height.
+ *
+ * @param data pointers array to be filled with the pointer for each image plane
+ * @param ptr the pointer to a buffer which will contain the image
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
+ uint8_t *ptr, const int linesizes[4]);
+
+/**
+ * Allocate an image with size w and h and pixel format pix_fmt, and
+ * fill pointers and linesizes accordingly.
+ * The allocated image buffer has to be freed by using
+ * av_freep(&pointers[0]).
+ *
+ * @param align the value to use for buffer size alignment
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
+ int w, int h, enum AVPixelFormat pix_fmt, int align);
+
+/**
+ * Copy image plane from src to dst.
+ * That is, copy "height" number of lines of "bytewidth" bytes each.
+ * The first byte of each successive line is separated by *_linesize
+ * bytes.
+ *
+ * @param dst_linesize linesize for the image plane in dst
+ * @param src_linesize linesize for the image plane in src
+ */
+void av_image_copy_plane(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int bytewidth, int height);
+
+/**
+ * Copy image in src_data to dst_data.
+ *
+ * @param dst_linesizes linesizes for the image in dst_data
+ * @param src_linesizes linesizes for the image in src_data
+ */
+void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
+ const uint8_t *src_data[4], const int src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Check if the given dimension of an image is valid, meaning that all
+ * bytes of the image can be addressed with a signed int.
+ *
+ * @param w the width of the picture
+ * @param h the height of the picture
+ * @param log_offset the offset to sum to the log level for logging with log_ctx
+ * @param log_ctx the parent logging context, it may be NULL
+ * @return >= 0 if valid, a negative error code otherwise
+ */
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);
+
+int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt);
+
+/**
+ * @}
+ */
+
+
+#endif /* AVUTIL_IMGUTILS_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/intfloat.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/intfloat.h
new file mode 100644
index 000000000..38d26ad87
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/intfloat.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include <stdint.h>
+#include "attributes.h"
+
+union av_intfloat32 {
+ uint32_t i;
+ float f;
+};
+
+union av_intfloat64 {
+ uint64_t i;
+ double f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i)
+{
+ union av_intfloat32 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f)
+{
+ union av_intfloat32 v;
+ v.f = f;
+ return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i)
+{
+ union av_intfloat64 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f)
+{
+ union av_intfloat64 v;
+ v.f = f;
+ return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/intreadwrite.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/intreadwrite.h
new file mode 100644
index 000000000..f77fd60f3
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/intreadwrite.h
@@ -0,0 +1,549 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTREADWRITE_H
+#define AVUTIL_INTREADWRITE_H
+
+#include <stdint.h>
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+#include "bswap.h"
+
+typedef union {
+ uint64_t u64;
+ uint32_t u32[2];
+ uint16_t u16[4];
+ uint8_t u8 [8];
+ double f64;
+ float f32[2];
+} av_alias av_alias64;
+
+typedef union {
+ uint32_t u32;
+ uint16_t u16[2];
+ uint8_t u8 [4];
+ float f32;
+} av_alias av_alias32;
+
+typedef union {
+ uint16_t u16;
+ uint8_t u8 [2];
+} av_alias av_alias16;
+
+/*
+ * Arch-specific headers can provide any combination of
+ * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
+ * Preprocessor symbols must be defined, even if these are implemented
+ * as inline functions.
+ */
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_ARM
+# include "arm/intreadwrite.h"
+#elif ARCH_AVR32
+# include "avr32/intreadwrite.h"
+#elif ARCH_MIPS
+# include "mips/intreadwrite.h"
+#elif ARCH_PPC
+# include "ppc/intreadwrite.h"
+#elif ARCH_TOMI
+# include "tomi/intreadwrite.h"
+#elif ARCH_X86
+# include "x86/intreadwrite.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+/*
+ * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
+ */
+
+#if AV_HAVE_BIGENDIAN
+
+# if defined(AV_RN16) && !defined(AV_RB16)
+# define AV_RB16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RB16)
+# define AV_RN16(p) AV_RB16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WB16)
+# define AV_WB16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WB16)
+# define AV_WN16(p, v) AV_WB16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RB24)
+# define AV_RB24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RB24)
+# define AV_RN24(p) AV_RB24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WB24)
+# define AV_WB24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WB24)
+# define AV_WN24(p, v) AV_WB24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RB32)
+# define AV_RB32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RB32)
+# define AV_RN32(p) AV_RB32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WB32)
+# define AV_WB32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WB32)
+# define AV_WN32(p, v) AV_WB32(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RB64)
+# define AV_RB64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RB64)
+# define AV_RN64(p) AV_RB64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WB64)
+# define AV_WB64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WB64)
+# define AV_WN64(p, v) AV_WB64(p, v)
+# endif
+
+#else /* AV_HAVE_BIGENDIAN */
+
+# if defined(AV_RN16) && !defined(AV_RL16)
+# define AV_RL16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RL16)
+# define AV_RN16(p) AV_RL16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WL16)
+# define AV_WL16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WL16)
+# define AV_WN16(p, v) AV_WL16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RL24)
+# define AV_RL24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RL24)
+# define AV_RN24(p) AV_RL24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WL24)
+# define AV_WL24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WL24)
+# define AV_WN24(p, v) AV_WL24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RL32)
+# define AV_RL32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RL32)
+# define AV_RN32(p) AV_RL32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WL32)
+# define AV_WL32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WL32)
+# define AV_WN32(p, v) AV_WL32(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RL64)
+# define AV_RL64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RL64)
+# define AV_RN64(p) AV_RL64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WL64)
+# define AV_WL64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WL64)
+# define AV_WN64(p, v) AV_WL64(p, v)
+# endif
+
+#endif /* !AV_HAVE_BIGENDIAN */
+
+/*
+ * Define AV_[RW]N helper macros to simplify definitions not provided
+ * by per-arch headers.
+ */
+
+#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)
+
+union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
+union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
+union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
+
+# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
+# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
+
+#elif defined(__DECC)
+
+# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
+# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
+
+#elif AV_HAVE_FAST_UNALIGNED
+
+# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
+# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#else
+
+#ifndef AV_RB16
+# define AV_RB16(x) \
+ ((((const uint8_t*)(x))[0] << 8) | \
+ ((const uint8_t*)(x))[1])
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, d) do { \
+ ((uint8_t*)(p))[1] = (d); \
+ ((uint8_t*)(p))[0] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(x) \
+ ((((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(x) \
+ (((uint32_t)((const uint8_t*)(x))[0] << 24) | \
+ (((const uint8_t*)(x))[1] << 16) | \
+ (((const uint8_t*)(x))[2] << 8) | \
+ ((const uint8_t*)(x))[3])
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, d) do { \
+ ((uint8_t*)(p))[3] = (d); \
+ ((uint8_t*)(p))[2] = (d)>>8; \
+ ((uint8_t*)(p))[1] = (d)>>16; \
+ ((uint8_t*)(p))[0] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(x) \
+ (((uint32_t)((const uint8_t*)(x))[3] << 24) | \
+ (((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(x) \
+ (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[7])
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, d) do { \
+ ((uint8_t*)(p))[7] = (d); \
+ ((uint8_t*)(p))[6] = (d)>>8; \
+ ((uint8_t*)(p))[5] = (d)>>16; \
+ ((uint8_t*)(p))[4] = (d)>>24; \
+ ((uint8_t*)(p))[3] = (d)>>32; \
+ ((uint8_t*)(p))[2] = (d)>>40; \
+ ((uint8_t*)(p))[1] = (d)>>48; \
+ ((uint8_t*)(p))[0] = (d)>>56; \
+ } while(0)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(x) \
+ (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ ((uint8_t*)(p))[4] = (d)>>32; \
+ ((uint8_t*)(p))[5] = (d)>>40; \
+ ((uint8_t*)(p))[6] = (d)>>48; \
+ ((uint8_t*)(p))[7] = (d)>>56; \
+ } while(0)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RN(s, p) AV_RB##s(p)
+# define AV_WN(s, p, v) AV_WB##s(p, v)
+#else
+# define AV_RN(s, p) AV_RL##s(p)
+# define AV_WN(s, p, v) AV_WL##s(p, v)
+#endif
+
+#endif /* HAVE_FAST_UNALIGNED */
+
+#ifndef AV_RN16
+# define AV_RN16(p) AV_RN(16, p)
+#endif
+
+#ifndef AV_RN32
+# define AV_RN32(p) AV_RN(32, p)
+#endif
+
+#ifndef AV_RN64
+# define AV_RN64(p) AV_RN(64, p)
+#endif
+
+#ifndef AV_WN16
+# define AV_WN16(p, v) AV_WN(16, p, v)
+#endif
+
+#ifndef AV_WN32
+# define AV_WN32(p, v) AV_WN(32, p, v)
+#endif
+
+#ifndef AV_WN64
+# define AV_WN64(p, v) AV_WN(64, p, v)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RB(s, p) AV_RN##s(p)
+# define AV_WB(s, p, v) AV_WN##s(p, v)
+# define AV_RL(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
+#else
+# define AV_RB(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
+# define AV_RL(s, p) AV_RN##s(p)
+# define AV_WL(s, p, v) AV_WN##s(p, v)
+#endif
+
+#define AV_RB8(x) (((const uint8_t*)(x))[0])
+#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
+
+#define AV_RL8(x) AV_RB8(x)
+#define AV_WL8(p, d) AV_WB8(p, d)
+
+#ifndef AV_RB16
+# define AV_RB16(p) AV_RB(16, p)
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, v) AV_WB(16, p, v)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(p) AV_RL(16, p)
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, v) AV_WL(16, p, v)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(p) AV_RB(32, p)
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, v) AV_WB(32, p, v)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(p) AV_RL(32, p)
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, v) AV_WL(32, p, v)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(p) AV_RB(64, p)
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, v) AV_WB(64, p, v)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(p) AV_RL(64, p)
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, v) AV_WL(64, p, v)
+#endif
+
+#ifndef AV_RB24
+# define AV_RB24(x) \
+ ((((const uint8_t*)(x))[0] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[2])
+#endif
+#ifndef AV_WB24
+# define AV_WB24(p, d) do { \
+ ((uint8_t*)(p))[2] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[0] = (d)>>16; \
+ } while(0)
+#endif
+
+#ifndef AV_RL24
+# define AV_RL24(x) \
+ ((((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL24
+# define AV_WL24(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ } while(0)
+#endif
+
+/*
+ * The AV_[RW]NA macros access naturally aligned data
+ * in a type-safe way.
+ */
+
+#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
+#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#ifndef AV_RN16A
+# define AV_RN16A(p) AV_RNA(16, p)
+#endif
+
+#ifndef AV_RN32A
+# define AV_RN32A(p) AV_RNA(32, p)
+#endif
+
+#ifndef AV_RN64A
+# define AV_RN64A(p) AV_RNA(64, p)
+#endif
+
+#ifndef AV_WN16A
+# define AV_WN16A(p, v) AV_WNA(16, p, v)
+#endif
+
+#ifndef AV_WN32A
+# define AV_WN32A(p, v) AV_WNA(32, p, v)
+#endif
+
+#ifndef AV_WN64A
+# define AV_WN64A(p, v) AV_WNA(64, p, v)
+#endif
+
+/*
+ * The AV_COPYxxU macros are suitable for copying data to/from unaligned
+ * memory locations.
+ */
+
+#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));
+
+#ifndef AV_COPY16U
+# define AV_COPY16U(d, s) AV_COPYU(16, d, s)
+#endif
+
+#ifndef AV_COPY32U
+# define AV_COPY32U(d, s) AV_COPYU(32, d, s)
+#endif
+
+#ifndef AV_COPY64U
+# define AV_COPY64U(d, s) AV_COPYU(64, d, s)
+#endif
+
+#ifndef AV_COPY128U
+# define AV_COPY128U(d, s) \
+ do { \
+ AV_COPY64U(d, s); \
+ AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \
+ } while(0)
+#endif
+
+/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
+ * naturally aligned. They may be implemented using MMX,
+ * so emms_c() must be called before using any float code
+ * afterwards.
+ */
+
+#define AV_COPY(n, d, s) \
+ (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
+
+#ifndef AV_COPY16
+# define AV_COPY16(d, s) AV_COPY(16, d, s)
+#endif
+
+#ifndef AV_COPY32
+# define AV_COPY32(d, s) AV_COPY(32, d, s)
+#endif
+
+#ifndef AV_COPY64
+# define AV_COPY64(d, s) AV_COPY(64, d, s)
+#endif
+
+#ifndef AV_COPY128
+# define AV_COPY128(d, s) \
+ do { \
+ AV_COPY64(d, s); \
+ AV_COPY64((char*)(d)+8, (char*)(s)+8); \
+ } while(0)
+#endif
+
+#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
+
+#ifndef AV_SWAP64
+# define AV_SWAP64(a, b) AV_SWAP(64, a, b)
+#endif
+
+#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
+
+#ifndef AV_ZERO16
+# define AV_ZERO16(d) AV_ZERO(16, d)
+#endif
+
+#ifndef AV_ZERO32
+# define AV_ZERO32(d) AV_ZERO(32, d)
+#endif
+
+#ifndef AV_ZERO64
+# define AV_ZERO64(d) AV_ZERO(64, d)
+#endif
+
+#ifndef AV_ZERO128
+# define AV_ZERO128(d) \
+ do { \
+ AV_ZERO64(d); \
+ AV_ZERO64((char*)(d)+8); \
+ } while(0)
+#endif
+
+#endif /* AVUTIL_INTREADWRITE_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/lfg.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/lfg.h
new file mode 100644
index 000000000..5e526c1da
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/lfg.h
@@ -0,0 +1,62 @@
+/*
+ * Lagged Fibonacci PRNG
+ * Copyright (c) 2008 Michael Niedermayer
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LFG_H
+#define AVUTIL_LFG_H
+
+typedef struct AVLFG {
+ unsigned int state[64];
+ int index;
+} AVLFG;
+
+void av_lfg_init(AVLFG *c, unsigned int seed);
+
+/**
+ * Get the next random unsigned 32-bit number using an ALFG.
+ *
+ * Please also consider a simple LCG like state= state*1664525+1013904223,
+ * it may be good enough and faster for your specific use case.
+ */
+static inline unsigned int av_lfg_get(AVLFG *c){
+ c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63];
+ return c->state[c->index++ & 63];
+}
+
+/**
+ * Get the next random unsigned 32-bit number using a MLFG.
+ *
+ * Please also consider av_lfg_get() above, it is faster.
+ */
+static inline unsigned int av_mlfg_get(AVLFG *c){
+ unsigned int a= c->state[(c->index-55) & 63];
+ unsigned int b= c->state[(c->index-24) & 63];
+ return c->state[c->index++ & 63] = 2*a*b+a+b;
+}
+
+/**
+ * Get the next two numbers generated by a Box-Muller Gaussian
+ * generator using the random numbers issued by lfg.
+ *
+ * @param out array where the two generated numbers are placed
+ */
+void av_bmg_get(AVLFG *lfg, double out[2]);
+
+#endif /* AVUTIL_LFG_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/log.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/log.h
new file mode 100644
index 000000000..6d26b67db
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/log.h
@@ -0,0 +1,262 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include <stdarg.h>
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption *option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for
+ * logging is stored. For example a decoder could pass its AVCodecContext
+ * to eval as such a parent context, which an av_log() implementation
+ * could then leverage to display the parent context.
+ * The offset can be NULL.
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void *obj, void *prev);
+
+ /**
+ * Return an AVClass corresponding to the next potential
+ * AVOptions-enabled child.
+ *
+ * The difference between child_next and this is that
+ * child_next iterates over _already existing_ objects, while
+ * child_class_next iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_next)(const struct AVClass *prev);
+} AVClass;
+
+/**
+ * @addtogroup lavu_log
+ *
+ * @{
+ *
+ * @defgroup lavu_log_constants Logging Constants
+ *
+ * @{
+ */
+
+/**
+ * Print no output.
+ */
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+/**
+ * Standard information.
+ */
+#define AV_LOG_INFO 32
+
+/**
+ * Detailed information.
+ */
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+/**
+ * @}
+ */
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ */
+void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_vlog(void *avcl, int level, const char *fmt, va_list vl);
+
+/**
+ * Get the current log level
+ *
+ * @see lavu_log_constants
+ *
+ * @return Current log level
+ */
+int av_log_get_level(void);
+
+/**
+ * Set the log level
+ *
+ * @see lavu_log_constants
+ *
+ * @param level Logging level
+ */
+void av_log_set_level(int level);
+
+/**
+ * Set the logging callback
+ *
+ * @see av_log_default_callback
+ *
+ * @param callback A logging function with a compatible signature.
+ */
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list));
+
+/**
+ * Default logging callback
+ *
+ * It prints the message to stderr, optionally colorizing it.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_log_default_callback(void *avcl, int level, const char *fmt,
+ va_list vl);
+
+/**
+ * Return the context name
+ *
+ * @param ctx The AVClass context
+ *
+ * @return The AVClass class_name
+ */
+const char* av_default_item_name(void* ctx);
+
+/**
+ * av_dlog macros
+ * Useful to print debug messages that shouldn't get compiled in normally.
+ */
+
+#ifdef DEBUG
+# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define av_dlog(pctx, ...)
+#endif
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+void av_log_set_flags(int arg);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LOG_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/lzo.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/lzo.h
new file mode 100644
index 000000000..9d7e8f1dc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/lzo.h
@@ -0,0 +1,66 @@
+/*
+ * LZO 1x decompression
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LZO_H
+#define AVUTIL_LZO_H
+
+/**
+ * @defgroup lavu_lzo LZO
+ * @ingroup lavu_crypto
+ *
+ * @{
+ */
+
+#include <stdint.h>
+
+/** @name Error flags returned by av_lzo1x_decode
+ * @{ */
+/// end of the input buffer reached before decoding finished
+#define AV_LZO_INPUT_DEPLETED 1
+/// decoded data did not fit into output buffer
+#define AV_LZO_OUTPUT_FULL 2
+/// a reference to previously decoded data was wrong
+#define AV_LZO_INVALID_BACKPTR 4
+/// a non-specific error in the compressed bitstream
+#define AV_LZO_ERROR 8
+/** @} */
+
+#define AV_LZO_INPUT_PADDING 8
+#define AV_LZO_OUTPUT_PADDING 12
+
+/**
+ * @brief Decodes LZO 1x compressed data.
+ * @param out output buffer
+ * @param outlen size of output buffer, number of bytes left are returned here
+ * @param in input buffer
+ * @param inlen size of input buffer, number of bytes left are returned here
+ * @return 0 on success, otherwise a combination of the error flags above
+ *
+ * Make sure all buffers are appropriately padded, in must provide
+ * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.
+ */
+int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LZO_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/macros.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/macros.h
new file mode 100644
index 000000000..bf3eb9b9a
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/macros.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Utility Preprocessor macros
+ */
+
+#ifndef AVUTIL_MACROS_H
+#define AVUTIL_MACROS_H
+
+/**
+ * @addtogroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+/**
+ * @}
+ */
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+#endif /* AVUTIL_MACROS_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/mathematics.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/mathematics.h
new file mode 100644
index 000000000..043dd0faf
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/mathematics.h
@@ -0,0 +1,111 @@
+/*
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include <stdint.h>
+#include <math.h>
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_LOG2_10
+#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_PHI
+#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef NAN
+#define NAN av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+#define INFINITY av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
+};
+
+/**
+ * Return the greatest common divisor of a and b.
+ * If both a and b are 0 or either or both are <0 then behavior is
+ * undefined.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding) av_const;
+
+/**
+ * Compare 2 timestamps each in its own timebases.
+ * The result of the function is undefined if one of the timestamps
+ * is outside the int64_t range when represented in the others timebase.
+ * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare 2 integers modulo mod.
+ * That is we compare integers a and b for which only the least
+ * significant log2(mod) bits are known.
+ *
+ * @param mod must be a power of 2
+ * @return a negative value if a is smaller than b
+ * a positive value if a is greater than b
+ * 0 if a equals b
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/md5.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/md5.h
new file mode 100644
index 000000000..29e4e7c2b
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/md5.h
@@ -0,0 +1,51 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MD5_H
+#define AVUTIL_MD5_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_md5 MD5
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#if FF_API_CONTEXT_SIZE
+extern attribute_deprecated const int av_md5_size;
+#endif
+
+struct AVMD5;
+
+struct AVMD5 *av_md5_alloc(void);
+void av_md5_init(struct AVMD5 *ctx);
+void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, const int len);
+void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
+void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MD5_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/mem.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/mem.h
new file mode 100644
index 000000000..4a5e362ce
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/mem.h
@@ -0,0 +1,265 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "attributes.h"
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_mem
+ * @{
+ */
+
+
+#if defined(__ICC) && __ICC < 1200 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__TI_COMPILER_VERSION__)
+ #define DECLARE_ALIGNED(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ t __attribute__((aligned(n))) v
+ #define DECLARE_ASM_CONST(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ static const t __attribute__((aligned(n))) v
+#elif defined(__GNUC__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+ #define av_malloc_attrib __attribute__((__malloc__))
+#else
+ #define av_malloc_attrib
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+ #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#else
+ #define av_alloc_size(...)
+#endif
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU).
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ */
+void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of size * nmemb bytes with av_malloc().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_malloc()
+ */
+av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size)
+{
+ if (!size || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_malloc(nmemb * size);
+}
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param size Size in bytes of the memory block to be allocated or
+ * reallocated.
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ * @see av_fast_realloc()
+ */
+void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If *ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or pointer to a pointer to NULL.
+ * The pointer is updated on success, or freed on failure.
+ * @param size Size in bytes for the memory block to be allocated or
+ * reallocated
+ * @return Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_reallocp(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+int av_reallocp(void *ptr, size_t size);
+
+/**
+ * Allocate or reallocate an array.
+ * If ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Allocate or reallocate an array through a pointer to a pointer.
+ * If *ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or pointer to a pointer to NULL.
+ * The pointer is updated on success, or freed on failure.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc().
+ * @param ptr Pointer to the memory block which should be freed.
+ * @note ptr = NULL is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead.
+ * @see av_freep()
+ */
+void av_free(void *ptr);
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ * @see av_malloc()
+ */
+void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of size * nmemb bytes with av_mallocz().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ * @see av_malloc_array()
+ */
+av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size)
+{
+ if (!size || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_mallocz(nmemb * size);
+}
+
+/**
+ * Duplicate the string s.
+ * @param s string to be duplicated
+ * @return Pointer to a newly-allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strdup(const char *s) av_malloc_attrib;
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc() and set the pointer pointing to it to NULL.
+ * @param ptr Pointer to the pointer to the memory block which should
+ * be freed.
+ * @see av_free()
+ */
+void av_freep(void *ptr);
+
+/**
+ * deliberately overlapping memcpy implementation
+ * @param dst destination buffer
+ * @param back how many bytes back we start (the initial size of the overlapping window)
+ * @param cnt number of bytes to copy, must be >= 0
+ *
+ * cnt > back is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of back.
+ */
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+
+/**
+ * Reallocate the given block if it is not large enough, otherwise do nothing.
+ *
+ * @see av_realloc
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ * *size 0 if an error occurred.
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/old_pix_fmts.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/old_pix_fmts.h
new file mode 100644
index 000000000..d3e1e5b24
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/old_pix_fmts.h
@@ -0,0 +1,134 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OLD_PIX_FMTS_H
+#define AVUTIL_OLD_PIX_FMTS_H
+
+/*
+ * This header exists to prevent new pixel formats from being accidentally added
+ * to the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVPixelFormat enum instead.
+ */
+ PIX_FMT_NONE = AV_PIX_FMT_NONE,
+ PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ PIX_FMT_GRAY8, ///< Y , 8bpp
+ PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+#if FF_API_XVMC
+ PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ PIX_FMT_XVMC_MPEG2_IDCT,
+#endif /* FF_API_XVMC */
+ PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+#if FF_API_VDPAU
+ PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+#if FF_API_VDPAU
+ PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ PIX_FMT_Y400A, ///< 8bit gray, 8bit alpha
+ PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+ PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+ PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian
+ PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian
+ PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian
+ PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian
+ PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian
+ PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+
+#endif /* AVUTIL_OLD_PIX_FMTS_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/opt.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/opt.h
new file mode 100644
index 000000000..0181379b7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/opt.h
@@ -0,0 +1,516 @@
+/*
+ * AVOptions
+ * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OPT_H
+#define AVUTIL_OPT_H
+
+/**
+ * @file
+ * AVOptions
+ */
+
+#include "rational.h"
+#include "avutil.h"
+#include "dict.h"
+#include "log.h"
+
+/**
+ * @defgroup avoptions AVOptions
+ * @ingroup lavu_data
+ * @{
+ * AVOptions provide a generic system to declare options on arbitrary structs
+ * ("objects"). An option can have a help text, a type and a range of possible
+ * values. Options may then be enumerated, read and written to.
+ *
+ * @section avoptions_implement Implementing AVOptions
+ * This section describes how to add AVOptions capabilities to a struct.
+ *
+ * All AVOptions-related information is stored in an AVClass. Therefore
+ * the first member of the struct must be a pointer to an AVClass describing it.
+ * The option field of the AVClass must be set to a NULL-terminated static array
+ * of AVOptions. Each AVOption must have a non-empty name, a type, a default
+ * value and for number-type AVOptions also a range of allowed values. It must
+ * also declare an offset in bytes from the start of the struct, where the field
+ * associated with this AVOption is located. Other fields in the AVOption struct
+ * should also be set when applicable, but are not required.
+ *
+ * The following example illustrates an AVOptions-enabled struct:
+ * @code
+ * typedef struct test_struct {
+ * AVClass *class;
+ * int int_opt;
+ * char *str_opt;
+ * uint8_t *bin_opt;
+ * int bin_len;
+ * } test_struct;
+ *
+ * static const AVOption test_options[] = {
+ * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt),
+ * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX },
+ * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt),
+ * AV_OPT_TYPE_STRING },
+ * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt),
+ * AV_OPT_TYPE_BINARY },
+ * { NULL },
+ * };
+ *
+ * static const AVClass test_class = {
+ * .class_name = "test class",
+ * .item_name = av_default_item_name,
+ * .option = test_options,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ * @endcode
+ *
+ * Next, when allocating your struct, you must ensure that the AVClass pointer
+ * is set to the correct value. Then, av_opt_set_defaults() must be called to
+ * initialize defaults. After that the struct is ready to be used with the
+ * AVOptions API.
+ *
+ * When cleaning up, you may use the av_opt_free() function to automatically
+ * free all the allocated string and binary options.
+ *
+ * Continuing with the above example:
+ *
+ * @code
+ * test_struct *alloc_test_struct(void)
+ * {
+ * test_struct *ret = av_malloc(sizeof(*ret));
+ * ret->class = &test_class;
+ * av_opt_set_defaults(ret);
+ * return ret;
+ * }
+ * void free_test_struct(test_struct **foo)
+ * {
+ * av_opt_free(*foo);
+ * av_freep(foo);
+ * }
+ * @endcode
+ *
+ * @subsection avoptions_implement_nesting Nesting
+ * It may happen that an AVOptions-enabled struct contains another
+ * AVOptions-enabled struct as a member (e.g. AVCodecContext in
+ * libavcodec exports generic options, while its priv_data field exports
+ * codec-specific options). In such a case, it is possible to set up the
+ * parent struct to export a child's options. To do that, simply
+ * implement AVClass.child_next() and AVClass.child_class_next() in the
+ * parent struct's AVClass.
+ * Assuming that the test_struct from above now also contains a
+ * child_struct field:
+ *
+ * @code
+ * typedef struct child_struct {
+ * AVClass *class;
+ * int flags_opt;
+ * } child_struct;
+ * static const AVOption child_opts[] = {
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX },
+ * { NULL },
+ * };
+ * static const AVClass child_class = {
+ * .class_name = "child class",
+ * .item_name = av_default_item_name,
+ * .option = child_opts,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ *
+ * void *child_next(void *obj, void *prev)
+ * {
+ * test_struct *t = obj;
+ * if (!prev && t->child_struct)
+ * return t->child_struct;
+ * return NULL
+ * }
+ * const AVClass child_class_next(const AVClass *prev)
+ * {
+ * return prev ? NULL : &child_class;
+ * }
+ * @endcode
+ * Putting child_next() and child_class_next() as defined above into
+ * test_class will now make child_struct's options accessible through
+ * test_struct (again, proper setup as described above needs to be done on
+ * child_struct right after it is created).
+ *
+ * From the above example it might not be clear why both child_next()
+ * and child_class_next() are needed. The distinction is that child_next()
+ * iterates over actually existing objects, while child_class_next()
+ * iterates over all possible child classes. E.g. if an AVCodecContext
+ * was initialized to use a codec which has private options, then its
+ * child_next() will return AVCodecContext.priv_data and finish
+ * iterating. OTOH child_class_next() on AVCodecContext.av_class will
+ * iterate over all available codecs with private options.
+ *
+ * @subsection avoptions_implement_named_constants Named constants
+ * It is possible to create named constants for options. Simply set the unit
+ * field of the option the constants should apply to to a string and
+ * create the constants themselves as options of type AV_OPT_TYPE_CONST
+ * with their unit field set to the same string.
+ * Their default_val field should contain the value of the named
+ * constant.
+ * For example, to add some named constants for the test_flags option
+ * above, put the following into the child_opts array:
+ * @code
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" },
+ * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" },
+ * @endcode
+ *
+ * @section avoptions_use Using AVOptions
+ * This section deals with accessing options in an AVOptions-enabled struct.
+ * Such structs in Libav are e.g. AVCodecContext in libavcodec or
+ * AVFormatContext in libavformat.
+ *
+ * @subsection avoptions_use_examine Examining AVOptions
+ * The basic functions for examining options are av_opt_next(), which iterates
+ * over all options defined for one object, and av_opt_find(), which searches
+ * for an option with the given name.
+ *
+ * The situation is more complicated with nesting. An AVOptions-enabled struct
+ * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag
+ * to av_opt_find() will make the function search children recursively.
+ *
+ * For enumerating there are basically two cases. The first is when you want to
+ * get all options that may potentially exist on the struct and its children
+ * (e.g. when constructing documentation). In that case you should call
+ * av_opt_child_class_next() recursively on the parent struct's AVClass. The
+ * second case is when you have an already initialized struct with all its
+ * children and you want to get all options that can be actually written or read
+ * from it. In that case you should call av_opt_child_next() recursively (and
+ * av_opt_next() on each result).
+ *
+ * @subsection avoptions_use_get_set Reading and writing AVOptions
+ * When setting options, you often have a string read directly from the
+ * user. In such a case, simply passing it to av_opt_set() is enough. For
+ * non-string type options, av_opt_set() will parse the string according to the
+ * option type.
+ *
+ * Similarly av_opt_get() will read any option type and convert it to a string
+ * which will be returned. Do not forget that the string is allocated, so you
+ * have to free it with av_free().
+ *
+ * In some cases it may be more convenient to put all options into an
+ * AVDictionary and call av_opt_set_dict() on it. A specific case of this
+ * are the format/codec open functions in lavf/lavc which take a dictionary
+ * filled with option as a parameter. This allows to set some options
+ * that cannot be set otherwise, since e.g. the input file format is not known
+ * before the file is actually opened.
+ */
+
+enum AVOptionType{
+ AV_OPT_TYPE_FLAGS,
+ AV_OPT_TYPE_INT,
+ AV_OPT_TYPE_INT64,
+ AV_OPT_TYPE_DOUBLE,
+ AV_OPT_TYPE_FLOAT,
+ AV_OPT_TYPE_STRING,
+ AV_OPT_TYPE_RATIONAL,
+ AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length
+ AV_OPT_TYPE_CONST = 128,
+};
+
+/**
+ * AVOption
+ */
+typedef struct AVOption {
+ const char *name;
+
+ /**
+ * short English help text
+ * @todo What about other languages?
+ */
+ const char *help;
+
+ /**
+ * The offset relative to the context structure where the option
+ * value is stored. It should be 0 for named constants.
+ */
+ int offset;
+ enum AVOptionType type;
+
+ /**
+ * the default value for scalar options
+ */
+ union {
+ int64_t i64;
+ double dbl;
+ const char *str;
+ /* TODO those are unused now */
+ AVRational q;
+ } default_val;
+ double min; ///< minimum valid value for the option
+ double max; ///< maximum valid value for the option
+
+ int flags;
+#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding
+#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding
+#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ...
+#define AV_OPT_FLAG_AUDIO_PARAM 8
+#define AV_OPT_FLAG_VIDEO_PARAM 16
+#define AV_OPT_FLAG_SUBTITLE_PARAM 32
+//FIXME think about enc-audio, ... style flags
+
+ /**
+ * The logical unit to which the option belongs. Non-constant
+ * options and corresponding named constants share the same
+ * unit. May be NULL.
+ */
+ const char *unit;
+} AVOption;
+
+/**
+ * Show the obj options.
+ *
+ * @param req_flags requested flags for the options to show. Show only the
+ * options for which it is opt->flags & req_flags.
+ * @param rej_flags rejected flags for the options to show. Show only the
+ * options for which it is !(opt->flags & req_flags).
+ * @param av_log_obj log context to use for showing the options
+ */
+int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);
+
+/**
+ * Set the values of all AVOption fields to their default values.
+ *
+ * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)
+ */
+void av_opt_set_defaults(void *s);
+
+/**
+ * Parse the key/value pairs list in opts. For each key/value pair
+ * found, stores the value in the field in ctx that is named like the
+ * key. ctx must be an AVClass context, storing is done using
+ * AVOptions.
+ *
+ * @param key_val_sep a 0-terminated list of characters used to
+ * separate key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @return the number of successfully set key/value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_set_string3() if a key/value pair
+ * cannot be set
+ */
+int av_set_options_string(void *ctx, const char *opts,
+ const char *key_val_sep, const char *pairs_sep);
+
+/**
+ * Free all string and binary options in obj.
+ */
+void av_opt_free(void *obj);
+
+/**
+ * Check whether a particular flag is set in a flags field.
+ *
+ * @param field_name the name of the flag field option
+ * @param flag_name the name of the flag to check
+ * @return non-zero if the flag is set, zero if the flag isn't set,
+ * isn't of the right type, or the flags field doesn't exist.
+ */
+int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);
+
+/*
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ * by a new one containing all options not found in obj.
+ * Of course this new dictionary needs to be freed by caller
+ * with av_dict_free().
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ * but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict(void *obj, struct AVDictionary **options);
+
+/**
+ * @defgroup opt_eval_funcs Evaluating option strings
+ * @{
+ * This group of functions can be used to evaluate option strings
+ * and get numbers out of them. They do the same thing as av_opt_set(),
+ * except the result is written into the caller-supplied pointer.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass.
+ * @param o an option for which the string is to be evaluated.
+ * @param val string to be evaluated.
+ * @param *_out value of the string will be written here.
+ *
+ * @return 0 on success, a negative number on failure.
+ */
+int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out);
+int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out);
+int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out);
+int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out);
+int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out);
+int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out);
+/**
+ * @}
+ */
+
+#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the
+ given object first. */
+/**
+ * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass
+ * instead of a required pointer to a struct containing AVClass. This is
+ * useful for searching for options without needing to allocate the corresponding
+ * object.
+ */
+#define AV_OPT_SEARCH_FAKE_OBJ 0x0002
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ *
+ * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable
+ * directly with av_set_string3(). Use special calls which take an options
+ * AVDictionary (e.g. avformat_open_input()) to set options found with this
+ * flag.
+ */
+const AVOption *av_opt_find(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags);
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ * @param[out] target_obj if non-NULL, an object to which the option belongs will be
+ * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present
+ * in search_flags. This parameter is ignored if search_flags contain
+ * AV_OPT_SEARCH_FAKE_OBJ.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ */
+const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags, void **target_obj);
+
+/**
+ * Iterate over all AVOptions belonging to obj.
+ *
+ * @param obj an AVOptions-enabled struct or a double pointer to an
+ * AVClass describing it.
+ * @param prev result of the previous call to av_opt_next() on this object
+ * or NULL
+ * @return next AVOption or NULL
+ */
+const AVOption *av_opt_next(void *obj, const AVOption *prev);
+
+/**
+ * Iterate over AVOptions-enabled children of obj.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return next AVOptions-enabled child or NULL
+ */
+void *av_opt_child_next(void *obj, void *prev);
+
+/**
+ * Iterate over potential AVOptions-enabled children of parent.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return AVClass corresponding to next potential child or NULL
+ */
+const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);
+
+/**
+ * @defgroup opt_set_funcs Option setting functions
+ * @{
+ * Those functions set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. In case of av_opt_set() if the field is not
+ * of a string type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be set on a child of obj.
+ *
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ */
+int av_opt_set (void *obj, const char *name, const char *val, int search_flags);
+int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags);
+int av_opt_set_double(void *obj, const char *name, double val, int search_flags);
+int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags);
+int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags);
+/**
+ * @}
+ */
+
+/**
+ * @defgroup opt_get_funcs Option getting functions
+ * @{
+ * Those functions get a value of the option with the given name from an object.
+ *
+ * @param[in] obj a struct whose first element is a pointer to an AVClass.
+ * @param[in] name name of the option to get.
+ * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be found in a child of obj.
+ * @param[out] out_val value of the option will be written here
+ * @return 0 on success, a negative error code otherwise
+ */
+/**
+ * @note the returned string will av_malloc()ed and must be av_free()ed by the caller
+ */
+int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val);
+int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val);
+int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val);
+int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val);
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_OPT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/parseutils.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/parseutils.h
new file mode 100644
index 000000000..0844abb2f
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/parseutils.h
@@ -0,0 +1,124 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PARSEUTILS_H
+#define AVUTIL_PARSEUTILS_H
+
+#include <time.h>
+
+#include "rational.h"
+
+/**
+ * @file
+ * misc parsing utilities
+ */
+
+/**
+ * Parse str and put in width_ptr and height_ptr the detected values.
+ *
+ * @param[in,out] width_ptr pointer to the variable which will contain the detected
+ * width value
+ * @param[in,out] height_ptr pointer to the variable which will contain the detected
+ * height value
+ * @param[in] str the string to parse: it has to be a string in the format
+ * width x height or a valid video size abbreviation.
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);
+
+/**
+ * Parse str and store the detected values in *rate.
+ *
+ * @param[in,out] rate pointer to the AVRational which will contain the detected
+ * frame rate
+ * @param[in] str the string to parse: it has to be a string in the format
+ * rate_num / rate_den, a float number or a valid video rate abbreviation
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_rate(AVRational *rate, const char *str);
+
+/**
+ * Put the RGBA values that correspond to color_string in rgba_color.
+ *
+ * @param color_string a string specifying a color. It can be the name of
+ * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,
+ * possibly followed by "@" and a string representing the alpha
+ * component.
+ * The alpha component may be a string composed by "0x" followed by an
+ * hexadecimal number or a decimal number between 0.0 and 1.0, which
+ * represents the opacity value (0x00/0.0 means completely transparent,
+ * 0xff/1.0 completely opaque).
+ * If the alpha component is not specified then 0xff is assumed.
+ * The string "random" will result in a random color.
+ * @param slen length of the initial part of color_string containing the
+ * color. It can be set to -1 if color_string is a null terminated string
+ * containing nothing else than the color.
+ * @return >= 0 in case of success, a negative value in case of
+ * failure (for example if color_string cannot be parsed).
+ */
+int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+ void *log_ctx);
+
+/**
+ * Parse timestr and return in *time a corresponding number of
+ * microseconds.
+ *
+ * @param timeval puts here the number of microseconds corresponding
+ * to the string in timestr. If the string represents a duration, it
+ * is the number of microseconds contained in the time interval. If
+ * the string is a date, is the number of microseconds since 1st of
+ * January, 1970 up to the time of the parsed date. If timestr cannot
+ * be successfully parsed, set *time to INT64_MIN.
+
+ * @param timestr a string representing a date or a duration.
+ * - If a date the syntax is:
+ * @code
+ * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH[:MM[:SS[.m...]]]}|{HH[MM[SS[.m...]]]}}[Z]
+ * now
+ * @endcode
+ * If the value is "now" it takes the current time.
+ * Time is local time unless Z is appended, in which case it is
+ * interpreted as UTC.
+ * If the year-month-day part is not specified it takes the current
+ * year-month-day.
+ * - If a duration the syntax is:
+ * @code
+ * [-]HH[:MM[:SS[.m...]]]
+ * [-]S+[.m...]
+ * @endcode
+ * @param duration flag which tells how to interpret timestr, if not
+ * zero timestr is interpreted as a duration, otherwise as a date
+ * @return 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_parse_time(int64_t *timeval, const char *timestr, int duration);
+
+/**
+ * Attempt to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+/**
+ * Convert the decomposed UTC time in tm to a time_t value.
+ */
+time_t av_timegm(struct tm *tm);
+
+#endif /* AVUTIL_PARSEUTILS_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/pixdesc.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/pixdesc.h
new file mode 100644
index 000000000..e5a16f418
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/pixdesc.h
@@ -0,0 +1,276 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXDESC_H
+#define AVUTIL_PIXDESC_H
+
+#include <inttypes.h>
+
+#include "attributes.h"
+#include "pixfmt.h"
+
+typedef struct AVComponentDescriptor{
+ uint16_t plane :2; ///< which of the 4 planes contains the component
+
+ /**
+ * Number of elements between 2 horizontally consecutive pixels minus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t step_minus1 :3;
+
+ /**
+ * Number of elements before the component of the first pixel plus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t offset_plus1 :3;
+ uint16_t shift :3; ///< number of least significant bits that must be shifted away to get the value
+ uint16_t depth_minus1 :4; ///< number of bits in the component minus 1
+}AVComponentDescriptor;
+
+/**
+ * Descriptor that unambiguously describes how the bits of a pixel are
+ * stored in the up to 4 data planes of an image. It also stores the
+ * subsampling factors and number of components.
+ *
+ * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV
+ * and all the YUV variants) AVPixFmtDescriptor just stores how values
+ * are stored not what these values represent.
+ */
+typedef struct AVPixFmtDescriptor{
+ const char *name;
+ uint8_t nb_components; ///< The number of components each pixel has, (1-4)
+
+ /**
+ * Amount to shift the luma width right to find the chroma width.
+ * For YV12 this is 1 for example.
+ * chroma_width = -((-luma_width) >> log2_chroma_w)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w)
+
+ /**
+ * Amount to shift the luma height right to find the chroma height.
+ * For YV12 this is 1 for example.
+ * chroma_height= -((-luma_height) >> log2_chroma_h)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_h;
+ uint8_t flags;
+
+ /**
+ * Parameters that describe how pixels are packed. If the format
+ * has chroma components, they must be stored in comp[1] and
+ * comp[2].
+ */
+ AVComponentDescriptor comp[4];
+}AVPixFmtDescriptor;
+
+/**
+ * Pixel format is big-endian.
+ */
+#define AV_PIX_FMT_FLAG_BE (1 << 0)
+/**
+ * Pixel format has a palette in data[1], values are indexes in this palette.
+ */
+#define AV_PIX_FMT_FLAG_PAL (1 << 1)
+/**
+ * All values of a component are bit-wise packed end to end.
+ */
+#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2)
+/**
+ * Pixel format is an HW accelerated format.
+ */
+#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3)
+/**
+ * At least one pixel component is not in the first data plane.
+ */
+#define AV_PIX_FMT_FLAG_PLANAR (1 << 4)
+/**
+ * The pixel format contains RGB-like data (as opposed to YUV/grayscale).
+ */
+#define AV_PIX_FMT_FLAG_RGB (1 << 5)
+/**
+ * The pixel format is "pseudo-paletted". This means that Libav treats it as
+ * paletted internally, but the palette is generated by the decoder and is not
+ * stored in the file.
+ */
+#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6)
+/**
+ * The pixel format has an alpha channel.
+ */
+#define AV_PIX_FMT_FLAG_ALPHA (1 << 7)
+
+#if FF_API_PIX_FMT
+/**
+ * @deprecated use the AV_PIX_FMT_FLAG_* flags
+ */
+#define PIX_FMT_BE AV_PIX_FMT_FLAG_BE
+#define PIX_FMT_PAL AV_PIX_FMT_FLAG_PAL
+#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM
+#define PIX_FMT_HWACCEL AV_PIX_FMT_FLAG_HWACCEL
+#define PIX_FMT_PLANAR AV_PIX_FMT_FLAG_PLANAR
+#define PIX_FMT_RGB AV_PIX_FMT_FLAG_RGB
+#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL
+#define PIX_FMT_ALPHA AV_PIX_FMT_FLAG_ALPHA
+#endif
+
+#if FF_API_PIX_FMT_DESC
+/**
+ * The array of all the pixel format descriptors.
+ */
+extern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[];
+#endif
+
+/**
+ * Read a line from an image, and write the values of the
+ * pixel format component c to dst.
+ *
+ * @param data the array containing the pointers to the planes of the image
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to read
+ * @param y the vertical coordinate of the first pixel to read
+ * @param w the width of the line to read, that is the number of
+ * values to write to dst
+ * @param read_pal_component if not zero and the format is a paletted
+ * format writes the values corresponding to the palette
+ * component c in data[1] to dst, rather than the palette indexes in
+ * data[0]. The behavior is undefined if the format is not paletted.
+ */
+void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component);
+
+/**
+ * Write the values from src to the pixel format component c of an
+ * image line.
+ *
+ * @param src array containing the values to write
+ * @param data the array containing the pointers to the planes of the
+ * image to write into. It is supposed to be zeroed.
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to write
+ * @param y the vertical coordinate of the first pixel to write
+ * @param w the width of the line to write, that is the number of
+ * values to write to the image line
+ */
+void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4],
+ const AVPixFmtDescriptor *desc, int x, int y, int c, int w);
+
+/**
+ * Return the pixel format corresponding to name.
+ *
+ * If there is no pixel format with name name, then looks for a
+ * pixel format with the name corresponding to the native endian
+ * format of name.
+ * For example in a little-endian system, first looks for "gray16",
+ * then for "gray16le".
+ *
+ * Finally if no pixel format has been found, returns PIX_FMT_NONE.
+ */
+enum AVPixelFormat av_get_pix_fmt(const char *name);
+
+/**
+ * Return the short name for a pixel format, NULL in case pix_fmt is
+ * unknown.
+ *
+ * @see av_get_pix_fmt(), av_get_pix_fmt_string()
+ */
+const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt);
+
+/**
+ * Print in buf the string corresponding to the pixel format with
+ * number pix_fmt, or an header if pix_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param pix_fmt the number of the pixel format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ */
+char *av_get_pix_fmt_string (char *buf, int buf_size, enum AVPixelFormat pix_fmt);
+
+/**
+ * Return the number of bits per pixel used by the pixel format
+ * described by pixdesc. Note that this is not the same as the number
+ * of bits per sample.
+ *
+ * The returned number of bits refers to the number of bits actually
+ * used for storing the pixel information, that is padding bits are
+ * not counted.
+ */
+int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * @return a pixel format descriptor for provided pixel format or NULL if
+ * this pixel format is unknown.
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt);
+
+/**
+ * Iterate over all pixel format descriptors known to libavutil.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev);
+
+/**
+ * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc
+ * is not a valid pointer to a pixel format descriptor.
+ */
+enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * @param[in] pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_h
+ * @param[out] v_shift store log2_chroma_w
+ *
+ * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format
+ */
+int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,
+ int *h_shift, int *v_shift);
+
+/**
+ * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a
+ * valid pixel format.
+ */
+int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt);
+
+
+/**
+ * Utility function to swap the endianness of a pixel format.
+ *
+ * @param[in] pix_fmt the pixel format
+ *
+ * @return pixel format with swapped endianness if it exists,
+ * otherwise AV_PIX_FMT_NONE
+ */
+enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);
+
+
+#endif /* AVUTIL_PIXDESC_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/pixfmt.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/pixfmt.h
new file mode 100644
index 000000000..0d6e0a300
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/pixfmt.h
@@ -0,0 +1,283 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ *
+ */
+
+#include "libavutil/avconfig.h"
+#include "version.h"
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @par
+ * When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ *
+ * @note
+ * Make sure that all newly added big-endian formats have pix_fmt & 1 == 1
+ * and that all newly added little-endian formats have pix_fmt & 1 == 0.
+ * This allows simpler detection of big vs little-endian.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+#if FF_API_XVMC
+ AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ AV_PIX_FMT_XVMC_MPEG2_IDCT,
+#endif /* FF_API_XVMC */
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+#if FF_API_VDPAU
+ AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+#if FF_API_VDPAU
+ AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ AV_PIX_FMT_Y400A, ///< 8bit gray, 8bit alpha
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
+ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+
+#if FF_API_PIX_FMT
+#include "old_pix_fmts.h"
+#endif
+};
+
+#if AV_HAVE_BIGENDIAN
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+
+#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
+#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
+
+#if FF_API_PIX_FMT
+#define PixelFormat AVPixelFormat
+
+#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le)
+
+#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32
+#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1
+#define PIX_FMT_BGR32 AV_PIX_FMT_BGR32
+#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1
+
+#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16
+#define PIX_FMT_RGB48 AV_PIX_FMT_RGB48
+#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565
+#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555
+#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444
+#define PIX_FMT_BGR48 AV_PIX_FMT_BGR48
+#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565
+#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555
+#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444
+
+#define PIX_FMT_YUV420P9 AV_PIX_FMT_YUV420P9
+#define PIX_FMT_YUV422P9 AV_PIX_FMT_YUV422P9
+#define PIX_FMT_YUV444P9 AV_PIX_FMT_YUV444P9
+#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10
+#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10
+#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10
+#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16
+#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16
+#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16
+
+#define PIX_FMT_GBRP9 AV_PIX_FMT_GBRP9
+#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10
+#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16
+#endif
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/random_seed.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/random_seed.h
new file mode 100644
index 000000000..b1fad13d0
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/random_seed.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RANDOM_SEED_H
+#define AVUTIL_RANDOM_SEED_H
+
+#include <stdint.h>
+/**
+ * @addtogroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Get random data.
+ *
+ * This function can be called repeatedly to generate more random bits
+ * as needed. It is generally quite slow, and usually used to seed a
+ * PRNG. As it uses /dev/urandom and /dev/random, the quality of the
+ * returned random data depends on the platform.
+ */
+uint32_t av_get_random_seed(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RANDOM_SEED_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/rational.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/rational.h
new file mode 100644
index 000000000..5d7dab7fd
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/rational.h
@@ -0,0 +1,155 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rational numbers
+ * @author Michael Niedermayer <michaelni@gmx.at>
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+/**
+ * rational number numerator/denominator
+ */
+typedef struct AVRational{
+ int num; ///< numerator
+ int den; ///< denominator
+} AVRational;
+
+/**
+ * Compare two rationals.
+ * @param a first rational
+ * @param b second rational
+ * @return 0 if a==b, 1 if a>b, -1 if a<b, and INT_MIN if one of the
+ * values is of the form 0/0
+ */
+static inline int av_cmp_q(AVRational a, AVRational b){
+ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;
+
+ if(tmp) return ((tmp ^ a.den ^ b.den)>>63)|1;
+ else if(b.den && a.den) return 0;
+ else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
+ else return INT_MIN;
+}
+
+/**
+ * Convert rational to double.
+ * @param a rational to convert
+ * @return (double) a
+ */
+static inline double av_q2d(AVRational a){
+ return a.num / (double) a.den;
+}
+
+/**
+ * Reduce a fraction.
+ * This is useful for framerate calculations.
+ * @param dst_num destination numerator
+ * @param dst_den destination denominator
+ * @param num source numerator
+ * @param den source denominator
+ * @param max the maximum allowed for dst_num & dst_den
+ * @return 1 if exact, 0 otherwise
+ */
+int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b first rational
+ * @param c second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b first rational
+ * @param c second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q)
+{
+ AVRational r = { q.den, q.num };
+ return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ * inf is expressed as {1,0} or {-1,0} depending on the sign.
+ *
+ * @param d double to convert
+ * @param max the maximum allowed numerator and denominator
+ * @return (AVRational) d
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer
+ * than q1, 0 if they have the same distance.
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the nearest value in q_list to q.
+ * @param q_list an array of rationals terminated by {0, 0}
+ * @return the index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/samplefmt.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/samplefmt.h
new file mode 100644
index 000000000..33cbdedf5
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/samplefmt.h
@@ -0,0 +1,220 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include <stdint.h>
+
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * Audio Sample Formats
+ *
+ * @par
+ * The data described by the sample format is always in native-endian order.
+ * Sample values can be expressed by native C types, hence the lack of a signed
+ * 24-bit sample format even though it is a common raw audio data format.
+ *
+ * @par
+ * The floating-point formats are based on full volume being in the range
+ * [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * @par
+ * The data layout as used in av_samples_fill_arrays() and elsewhere in Libav
+ * (such as AVFrame in libavcodec) is as follows:
+ *
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char *name);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return the packed alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return the planar alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Fill channel data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The pointers array is filled with the pointers to the samples data:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The linesize array is filled with the aligned size of each channel's data
+ * buffer for planar layout, or the aligned size of the buffer for all channels
+ * for packed layout.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return 0 on success or a negative error code on failure
+ */
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
+ const uint8_t *buf,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s), may be NULL
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return 0 on success or a negative error code on failure
+ * @see av_samples_fill_arrays()
+ */
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data array of pointers to data planes
+ * @param offset offset in samples at which to start filling
+ * @param nb_samples number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
+ int nb_channels, enum AVSampleFormat sample_fmt);
+
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/sha.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/sha.h
new file mode 100644
index 000000000..4c9a0c909
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/sha.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA_H
+#define AVUTIL_SHA_H
+
+#include <stdint.h>
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_sha SHA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#if FF_API_CONTEXT_SIZE
+extern attribute_deprecated const int av_sha_size;
+#endif
+
+struct AVSHA;
+
+/**
+ * Allocate an AVSHA context.
+ */
+struct AVSHA *av_sha_alloc(void);
+
+/**
+ * Initialize SHA-1 or SHA-2 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha_size)
+ * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits)
+ * @return zero if initialization succeeded, -1 otherwise
+ */
+int av_sha_init(struct AVSHA* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data input data to update hash with
+ * @param len input data length
+ */
+void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest buffer where output digest value is stored
+ */
+void av_sha_final(struct AVSHA* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/stereo3d.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/stereo3d.h
new file mode 100644
index 000000000..695d6f1ba
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/stereo3d.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "frame.h"
+
+/**
+ * List of possible 3D Types
+ */
+enum AVStereo3DType {
+ /**
+ * Video is not stereoscopic (and metadata has to be there).
+ */
+ AV_STEREO3D_2D,
+
+ /**
+ * Views are next to each other.
+ *
+ * LLLLRRRR
+ * LLLLRRRR
+ * LLLLRRRR
+ * ...
+ */
+ AV_STEREO3D_SIDEBYSIDE,
+
+ /**
+ * Views are on top of each other.
+ *
+ * LLLLLLLL
+ * LLLLLLLL
+ * RRRRRRRR
+ * RRRRRRRR
+ */
+ AV_STEREO3D_TOPBOTTOM,
+
+ /**
+ * Views are alternated temporally.
+ *
+ * frame0 frame1 frame2 ...
+ * LLLLLLLL RRRRRRRR LLLLLLLL
+ * LLLLLLLL RRRRRRRR LLLLLLLL
+ * LLLLLLLL RRRRRRRR LLLLLLLL
+ * ... ... ...
+ */
+ AV_STEREO3D_FRAMESEQUENCE,
+
+ /**
+ * Views are packed in a checkerboard-like structure per pixel.
+ *
+ * LRLRLRLR
+ * RLRLRLRL
+ * LRLRLRLR
+ * ...
+ */
+ AV_STEREO3D_CHECKERBOARD,
+
+ /**
+ * Views are next to each other, but when upscaling
+ * apply a checkerboard pattern.
+ *
+ * LLLLRRRR L L L L R R R R
+ * LLLLRRRR => L L L L R R R R
+ * LLLLRRRR L L L L R R R R
+ * LLLLRRRR L L L L R R R R
+ */
+ AV_STEREO3D_SIDEBYSIDE_QUINCUNX,
+
+ /**
+ * Views are packed per line, as if interlaced.
+ *
+ * LLLLLLLL
+ * RRRRRRRR
+ * LLLLLLLL
+ * ...
+ */
+ AV_STEREO3D_LINES,
+
+ /**
+ * Views are packed per column.
+ *
+ * LRLRLRLR
+ * LRLRLRLR
+ * LRLRLRLR
+ * ...
+ */
+ AV_STEREO3D_COLUMNS,
+};
+
+
+/**
+ * Inverted views, Right/Bottom represents the left view.
+ */
+#define AV_STEREO3D_FLAG_INVERT (1 << 0)
+
+/**
+ * Stereo 3D type: this structure describes how two videos are packed
+ * within a single video surface, with additional information as needed.
+ *
+ * @note The struct must be allocated with av_stereo3d_alloc() and
+ * its size is not a part of the public ABI.
+ */
+typedef struct AVStereo3D {
+ /**
+ * How views are packed within the video.
+ */
+ enum AVStereo3DType type;
+
+ /**
+ * Additional information about the frame packing.
+ */
+ int flags;
+} AVStereo3D;
+
+/**
+ * Allocate an AVStereo3D structure and set its fields to default values.
+ * The resulting struct can be freed using av_freep().
+ *
+ * @return An AVStereo3D filled with default values or NULL on failure.
+ */
+AVStereo3D *av_stereo3d_alloc(void);
+
+/**
+ * Allocate a complete AVFrameSideData and add it to the frame.
+ *
+ * @param frame The frame which side data is added to.
+ *
+ * @return The AVStereo3D structure to be filled by caller.
+ */
+AVStereo3D *av_stereo3d_create_side_data(AVFrame *frame);
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/time.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/time.h
new file mode 100644
index 000000000..b01a97d77
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/time.h
@@ -0,0 +1,39 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_H
+#define AVUTIL_TIME_H
+
+#include <stdint.h>
+
+/**
+ * Get the current time in microseconds.
+ */
+int64_t av_gettime(void);
+
+/**
+ * Sleep for a period of time. Although the duration is expressed in
+ * microseconds, the actual delay may be rounded to the precision of the
+ * system timer.
+ *
+ * @param usec Number of microseconds to sleep.
+ * @return zero on success or (negative) error code.
+ */
+int av_usleep(unsigned usec);
+
+#endif /* AVUTIL_TIME_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/version.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/version.h
new file mode 100644
index 000000000..5196a674d
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/version.h
@@ -0,0 +1,116 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+#include "macros.h"
+
+/**
+ * @defgroup version_utils Library Version Macros
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c)
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * @}
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 53
+#define LIBAVUTIL_VERSION_MINOR 3
+#define LIBAVUTIL_VERSION_MICRO 0
+
+#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @}
+ *
+ * @defgroup depr_guards Deprecation guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @{
+ */
+
+#ifndef FF_API_PIX_FMT
+#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_CONTEXT_SIZE
+#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_PIX_FMT_DESC
+#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_AV_REVERSE
+#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_AUDIOCONVERT
+#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_CPU_FLAG_MMX2
+#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_LLS_PRIVATE
+#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_AVFRAME_LAVC
+#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_VDPAU
+#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 54)
+#endif
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/include/libavutil/xtea.h b/dom/media/platforms/ffmpeg/libav55/include/libavutil/xtea.h
new file mode 100644
index 000000000..7d2b07bbc
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/include/libavutil/xtea.h
@@ -0,0 +1,61 @@
+/*
+ * A 32-bit implementation of the XTEA algorithm
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_XTEA_H
+#define AVUTIL_XTEA_H
+
+#include <stdint.h>
+
+/**
+ * @defgroup lavu_xtea XTEA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+typedef struct AVXTEA {
+ uint32_t key[16];
+} AVXTEA;
+
+/**
+ * Initialize an AVXTEA context.
+ *
+ * @param ctx an AVXTEA context
+ * @param key a key of 16 bytes used for encryption/decryption
+ */
+void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVXTEA context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_XTEA_H */
diff --git a/dom/media/platforms/ffmpeg/libav55/moz.build b/dom/media/platforms/ffmpeg/libav55/moz.build
new file mode 100644
index 000000000..6bd5db8d7
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/libav55/moz.build
@@ -0,0 +1,25 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+UNIFIED_SOURCES += [
+ '../FFmpegAudioDecoder.cpp',
+ '../FFmpegDataDecoder.cpp',
+ '../FFmpegDecoderModule.cpp',
+ '../FFmpegVideoDecoder.cpp',
+]
+LOCAL_INCLUDES += [
+ '..',
+ 'include',
+]
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += [ '-Wno-deprecated-declarations' ]
+if CONFIG['CLANG_CXX']:
+ CXXFLAGS += [
+ '-Wno-unknown-attributes',
+ ]
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/platforms/ffmpeg/moz.build b/dom/media/platforms/ffmpeg/moz.build
new file mode 100644
index 000000000..604e445aa
--- /dev/null
+++ b/dom/media/platforms/ffmpeg/moz.build
@@ -0,0 +1,22 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'FFmpegRuntimeLinker.h',
+]
+
+DIRS += [
+ 'libav53',
+ 'libav54',
+ 'libav55',
+ 'ffmpeg57',
+]
+
+UNIFIED_SOURCES += [
+ 'FFmpegRuntimeLinker.cpp',
+]
+
+FINAL_LIBRARY = 'xul'
diff --git a/dom/media/platforms/gonk/GonkAudioDecoderManager.cpp b/dom/media/platforms/gonk/GonkAudioDecoderManager.cpp
new file mode 100644
index 000000000..0bc3fbea9
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkAudioDecoderManager.cpp
@@ -0,0 +1,268 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "MediaCodecProxy.h"
+#include <OMX_IVCommon.h>
+#include <gui/Surface.h>
+#include <ICrypto.h>
+#include "GonkAudioDecoderManager.h"
+#include "MediaDecoderReader.h"
+#include "VideoUtils.h"
+#include "nsTArray.h"
+#include "mozilla/Logging.h"
+#include "stagefright/MediaBuffer.h"
+#include "stagefright/MetaData.h"
+#include "stagefright/MediaErrors.h"
+#include <stagefright/foundation/AMessage.h>
+#include <stagefright/foundation/ALooper.h>
+#include "media/openmax/OMX_Audio.h"
+#include "MediaData.h"
+#include "MediaInfo.h"
+
+#define CODECCONFIG_TIMEOUT_US 10000LL
+#define READ_OUTPUT_BUFFER_TIMEOUT_US 0LL
+
+#include <android/log.h>
+#define GADM_LOG(...) __android_log_print(ANDROID_LOG_DEBUG, "GonkAudioDecoderManager", __VA_ARGS__)
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+using namespace android;
+typedef android::MediaCodecProxy MediaCodecProxy;
+
+namespace mozilla {
+
+GonkAudioDecoderManager::GonkAudioDecoderManager(const AudioInfo& aConfig)
+ : mAudioChannels(aConfig.mChannels)
+ , mAudioRate(aConfig.mRate)
+ , mAudioProfile(aConfig.mProfile)
+ , mAudioCompactor(mAudioQueue)
+{
+ MOZ_COUNT_CTOR(GonkAudioDecoderManager);
+ MOZ_ASSERT(mAudioChannels);
+ mCodecSpecificData = aConfig.mCodecSpecificConfig;
+ mMimeType = aConfig.mMimeType;
+}
+
+GonkAudioDecoderManager::~GonkAudioDecoderManager()
+{
+ MOZ_COUNT_DTOR(GonkAudioDecoderManager);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+GonkAudioDecoderManager::Init()
+{
+ if (InitMediaCodecProxy()) {
+ return InitPromise::CreateAndResolve(TrackType::kAudioTrack, __func__);
+ } else {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+}
+
+bool
+GonkAudioDecoderManager::InitMediaCodecProxy()
+{
+ status_t rv = OK;
+ if (!InitLoopers(MediaData::AUDIO_DATA)) {
+ return false;
+ }
+
+ mDecoder = MediaCodecProxy::CreateByType(mDecodeLooper, mMimeType.get(), false);
+ if (!mDecoder.get()) {
+ return false;
+ }
+ if (!mDecoder->AllocateAudioMediaCodec())
+ {
+ mDecoder = nullptr;
+ return false;
+ }
+ sp<AMessage> format = new AMessage;
+ // Fixed values
+ GADM_LOG("Configure audio mime type:%s, chan no:%d, sample-rate:%d, profile:%d",
+ mMimeType.get(), mAudioChannels, mAudioRate, mAudioProfile);
+ format->setString("mime", mMimeType.get());
+ format->setInt32("channel-count", mAudioChannels);
+ format->setInt32("sample-rate", mAudioRate);
+ format->setInt32("aac-profile", mAudioProfile);
+ status_t err = mDecoder->configure(format, nullptr, nullptr, 0);
+ if (err != OK || !mDecoder->Prepare()) {
+ return false;
+ }
+
+ if (mMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ rv = mDecoder->Input(mCodecSpecificData->Elements(), mCodecSpecificData->Length(), 0,
+ android::MediaCodec::BUFFER_FLAG_CODECCONFIG,
+ CODECCONFIG_TIMEOUT_US);
+ }
+
+ if (rv == OK) {
+ return true;
+ } else {
+ GADM_LOG("Failed to input codec specific data!");
+ return false;
+ }
+}
+
+nsresult
+GonkAudioDecoderManager::CreateAudioData(MediaBuffer* aBuffer, int64_t aStreamOffset)
+{
+ if (!(aBuffer != nullptr && aBuffer->data() != nullptr)) {
+ GADM_LOG("Audio Buffer is not valid!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ int64_t timeUs;
+ if (!aBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (aBuffer->range_length() == 0) {
+ // Some decoders may return spurious empty buffers that we just want to ignore
+ // quoted from Android's AwesomePlayer.cpp
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (mLastTime > timeUs) {
+ GADM_LOG("Output decoded sample time is revert. time=%lld", timeUs);
+ MOZ_ASSERT(false);
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ mLastTime = timeUs;
+
+ const uint8_t *data = static_cast<const uint8_t*>(aBuffer->data());
+ size_t dataOffset = aBuffer->range_offset();
+ size_t size = aBuffer->range_length();
+
+ uint32_t frames = size / (2 * mAudioChannels);
+
+ CheckedInt64 duration = FramesToUsecs(frames, mAudioRate);
+ if (!duration.isValid()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ typedef AudioCompactor::NativeCopy OmxCopy;
+ mAudioCompactor.Push(aStreamOffset,
+ timeUs,
+ mAudioRate,
+ frames,
+ mAudioChannels,
+ OmxCopy(data+dataOffset,
+ size,
+ mAudioChannels));
+ return NS_OK;
+}
+
+nsresult
+GonkAudioDecoderManager::Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutData)
+{
+ aOutData = nullptr;
+ if (mAudioQueue.GetSize() > 0) {
+ aOutData = mAudioQueue.PopFront();
+ return mAudioQueue.AtEndOfStream() ? NS_ERROR_ABORT : NS_OK;
+ }
+
+ status_t err;
+ MediaBuffer* audioBuffer = nullptr;
+ err = mDecoder->Output(&audioBuffer, READ_OUTPUT_BUFFER_TIMEOUT_US);
+ AutoReleaseMediaBuffer a(audioBuffer, mDecoder.get());
+
+ switch (err) {
+ case OK:
+ {
+ nsresult rv = CreateAudioData(audioBuffer, aStreamOffset);
+ NS_ENSURE_SUCCESS(rv, rv);
+ break;
+ }
+ case android::INFO_FORMAT_CHANGED:
+ {
+ // If the format changed, update our cached info.
+ GADM_LOG("Decoder format changed");
+ sp<AMessage> audioCodecFormat;
+
+ if (mDecoder->getOutputFormat(&audioCodecFormat) != OK ||
+ audioCodecFormat == nullptr) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ int32_t codec_channel_count = 0;
+ int32_t codec_sample_rate = 0;
+
+ if (!audioCodecFormat->findInt32("channel-count", &codec_channel_count) ||
+ !audioCodecFormat->findInt32("sample-rate", &codec_sample_rate)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // Update AudioInfo
+ AudioConfig::ChannelLayout layout(codec_channel_count);
+ if (!layout.IsValid()) {
+ return NS_ERROR_FAILURE;
+ }
+ mAudioChannels = codec_channel_count;
+ mAudioRate = codec_sample_rate;
+
+ return Output(aStreamOffset, aOutData);
+ }
+ case android::INFO_OUTPUT_BUFFERS_CHANGED:
+ {
+ GADM_LOG("Info Output Buffers Changed");
+ if (mDecoder->UpdateOutputBuffers()) {
+ return Output(aStreamOffset, aOutData);
+ }
+ return NS_ERROR_FAILURE;
+ }
+ case -EAGAIN:
+ {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ case android::ERROR_END_OF_STREAM:
+ {
+ GADM_LOG("Got EOS frame!");
+ nsresult rv = CreateAudioData(audioBuffer, aStreamOffset);
+ NS_ENSURE_SUCCESS(rv, NS_ERROR_ABORT);
+ MOZ_ASSERT(mAudioQueue.GetSize() > 0);
+ mAudioQueue.Finish();
+ break;
+ }
+ case -ETIMEDOUT:
+ {
+ GADM_LOG("Timeout. can try again next time");
+ return NS_ERROR_UNEXPECTED;
+ }
+ default:
+ {
+ GADM_LOG("Decoder failed, err=%d", err);
+ return NS_ERROR_UNEXPECTED;
+ }
+ }
+
+ if (mAudioQueue.GetSize() > 0) {
+ aOutData = mAudioQueue.PopFront();
+ // Return NS_ERROR_ABORT at the last sample.
+ return mAudioQueue.AtEndOfStream() ? NS_ERROR_ABORT : NS_OK;
+ }
+
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+void
+GonkAudioDecoderManager::ProcessFlush()
+{
+ GADM_LOG("FLUSH<<<");
+ mAudioQueue.Reset();
+ GADM_LOG(">>>FLUSH");
+ GonkDecoderManager::ProcessFlush();
+}
+
+void
+GonkAudioDecoderManager::ResetEOS()
+{
+ GADM_LOG("ResetEOS(<<<");
+ mAudioQueue.Reset();
+ GADM_LOG(">>>ResetEOS(");
+ GonkDecoderManager::ResetEOS();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/gonk/GonkAudioDecoderManager.h b/dom/media/platforms/gonk/GonkAudioDecoderManager.h
new file mode 100644
index 000000000..aa35d620e
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkAudioDecoderManager.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GonkAudioDecoderManager_h_)
+#define GonkAudioDecoderManager_h_
+
+#include "AudioCompactor.h"
+#include "mozilla/RefPtr.h"
+#include "GonkMediaDataDecoder.h"
+
+using namespace android;
+
+namespace android {
+class MOZ_EXPORT MediaBuffer;
+} // namespace android
+
+namespace mozilla {
+
+class GonkAudioDecoderManager : public GonkDecoderManager {
+typedef android::MediaCodecProxy MediaCodecProxy;
+public:
+ GonkAudioDecoderManager(const AudioInfo& aConfig);
+
+ virtual ~GonkAudioDecoderManager();
+
+ RefPtr<InitPromise> Init() override;
+
+ nsresult Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutput) override;
+
+ void ProcessFlush() override;
+ virtual void ResetEOS() override;
+
+ const char* GetDescriptionName() const override
+ {
+ return "gonk audio decoder";
+ }
+
+private:
+ bool InitMediaCodecProxy();
+
+ nsresult CreateAudioData(MediaBuffer* aBuffer, int64_t aStreamOffset);
+
+ uint32_t mAudioChannels;
+ uint32_t mAudioRate;
+ const uint32_t mAudioProfile;
+
+ MediaQueue<AudioData> mAudioQueue;
+
+ AudioCompactor mAudioCompactor;
+
+};
+
+} // namespace mozilla
+
+#endif // GonkAudioDecoderManager_h_
diff --git a/dom/media/platforms/gonk/GonkDecoderModule.cpp b/dom/media/platforms/gonk/GonkDecoderModule.cpp
new file mode 100644
index 000000000..537bc299c
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkDecoderModule.cpp
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "GonkDecoderModule.h"
+#include "GonkVideoDecoderManager.h"
+#include "GonkAudioDecoderManager.h"
+#include "mozilla/DebugOnly.h"
+#include "GonkMediaDataDecoder.h"
+
+namespace mozilla {
+GonkDecoderModule::GonkDecoderModule()
+{
+}
+
+GonkDecoderModule::~GonkDecoderModule()
+{
+}
+
+already_AddRefed<MediaDataDecoder>
+GonkDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> decoder =
+ new GonkMediaDataDecoder(new GonkVideoDecoderManager(aParams.mImageContainer, aParams.VideoConfig()),
+ aParams.mCallback);
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+GonkDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<MediaDataDecoder> decoder =
+ new GonkMediaDataDecoder(new GonkAudioDecoderManager(aParams.AudioConfig()),
+ aParams.mCallback);
+ return decoder.forget();
+}
+
+PlatformDecoderModule::ConversionRequired
+GonkDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ if (aConfig.IsVideo()) {
+ return ConversionRequired::kNeedAnnexB;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+}
+
+bool
+GonkDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ return aMimeType.EqualsLiteral("audio/mp4a-latm") ||
+ aMimeType.EqualsLiteral("audio/3gpp") ||
+ aMimeType.EqualsLiteral("audio/amr-wb") ||
+ aMimeType.EqualsLiteral("audio/mpeg") ||
+ aMimeType.EqualsLiteral("video/mp4") ||
+ aMimeType.EqualsLiteral("video/mp4v-es") ||
+ aMimeType.EqualsLiteral("video/avc") ||
+ aMimeType.EqualsLiteral("video/3gpp");
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/gonk/GonkDecoderModule.h b/dom/media/platforms/gonk/GonkDecoderModule.h
new file mode 100644
index 000000000..4f29f0e75
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkDecoderModule.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GonkPlatformDecoderModule_h_)
+#define GonkPlatformDecoderModule_h_
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class GonkDecoderModule : public PlatformDecoderModule {
+public:
+ GonkDecoderModule();
+ virtual ~GonkDecoderModule();
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ // Decode thread.
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/gonk/GonkMediaDataDecoder.cpp b/dom/media/platforms/gonk/GonkMediaDataDecoder.cpp
new file mode 100644
index 000000000..6d59d72e1
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkMediaDataDecoder.cpp
@@ -0,0 +1,385 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "GonkMediaDataDecoder.h"
+#include "VideoUtils.h"
+#include "nsTArray.h"
+#include "MediaCodecProxy.h"
+
+#include <stagefright/foundation/ADebug.h>
+
+#include "mozilla/Logging.h"
+#include <android/log.h>
+#define GMDD_LOG(...) __android_log_print(ANDROID_LOG_DEBUG, "GonkMediaDataDecoder", __VA_ARGS__)
+#define INPUT_TIMEOUT_US 0LL // Don't wait for buffer if none is available.
+#define MIN_QUEUED_SAMPLES 2
+
+#ifdef DEBUG
+#include <utils/AndroidThreads.h>
+#endif
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+using namespace android;
+
+namespace mozilla {
+
+bool
+GonkDecoderManager::InitLoopers(MediaData::Type aType)
+{
+ MOZ_ASSERT(mDecodeLooper.get() == nullptr && mTaskLooper.get() == nullptr);
+ MOZ_ASSERT(aType == MediaData::VIDEO_DATA || aType == MediaData::AUDIO_DATA);
+
+ const char* suffix = (aType == MediaData::VIDEO_DATA ? "video" : "audio");
+ mDecodeLooper = new ALooper;
+ android::AString name("MediaCodecProxy/");
+ name.append(suffix);
+ mDecodeLooper->setName(name.c_str());
+
+ mTaskLooper = new ALooper;
+ name.setTo("GonkDecoderManager/");
+ name.append(suffix);
+ mTaskLooper->setName(name.c_str());
+ mTaskLooper->registerHandler(this);
+
+#ifdef DEBUG
+ sp<AMessage> findThreadId(new AMessage(kNotifyFindLooperId, id()));
+ findThreadId->post();
+#endif
+
+ return mDecodeLooper->start() == OK && mTaskLooper->start() == OK;
+}
+
+nsresult
+GonkDecoderManager::Input(MediaRawData* aSample)
+{
+ RefPtr<MediaRawData> sample;
+
+ if (aSample) {
+ sample = aSample;
+ } else {
+ // It means EOS with empty sample.
+ sample = new MediaRawData();
+ }
+ {
+ MutexAutoLock lock(mMutex);
+ mQueuedSamples.AppendElement(sample);
+ }
+
+ sp<AMessage> input = new AMessage(kNotifyProcessInput, id());
+ if (!aSample) {
+ input->setInt32("input-eos", 1);
+ }
+ input->post();
+ return NS_OK;
+}
+
+int32_t
+GonkDecoderManager::ProcessQueuedSamples()
+{
+ MOZ_ASSERT(OnTaskLooper());
+
+ MutexAutoLock lock(mMutex);
+ status_t rv;
+ while (mQueuedSamples.Length()) {
+ RefPtr<MediaRawData> data = mQueuedSamples.ElementAt(0);
+ rv = mDecoder->Input(reinterpret_cast<const uint8_t*>(data->Data()),
+ data->Size(),
+ data->mTime,
+ 0,
+ INPUT_TIMEOUT_US);
+ if (rv == OK) {
+ mQueuedSamples.RemoveElementAt(0);
+ mWaitOutput.AppendElement(WaitOutputInfo(data->mOffset, data->mTime,
+ /* eos */ data->Data() == nullptr));
+ } else if (rv == -EAGAIN || rv == -ETIMEDOUT) {
+ // In most cases, EAGAIN or ETIMEOUT are safe because OMX can't fill
+ // buffer on time.
+ break;
+ } else {
+ return rv;
+ }
+ }
+ return mQueuedSamples.Length();
+}
+
+nsresult
+GonkDecoderManager::Flush()
+{
+ if (mDecoder == nullptr) {
+ GMDD_LOG("Decoder is not initialized");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (!mInitPromise.IsEmpty()) {
+ return NS_OK;
+ }
+
+ {
+ MutexAutoLock lock(mMutex);
+ mQueuedSamples.Clear();
+ }
+
+ MonitorAutoLock lock(mFlushMonitor);
+ mIsFlushing = true;
+ sp<AMessage> flush = new AMessage(kNotifyProcessFlush, id());
+ flush->post();
+ while (mIsFlushing) {
+ lock.Wait();
+ }
+ return NS_OK;
+}
+
+nsresult
+GonkDecoderManager::Shutdown()
+{
+ if (mDecoder.get()) {
+ mDecoder->stop();
+ mDecoder->ReleaseMediaResources();
+ mDecoder = nullptr;
+ }
+
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+
+ return NS_OK;
+}
+
+size_t
+GonkDecoderManager::NumQueuedSamples()
+{
+ MutexAutoLock lock(mMutex);
+ return mQueuedSamples.Length();
+}
+
+void
+GonkDecoderManager::ProcessInput(bool aEndOfStream)
+{
+ MOZ_ASSERT(OnTaskLooper());
+
+ status_t rv = ProcessQueuedSamples();
+ if (rv >= 0) {
+ if (!aEndOfStream && rv <= MIN_QUEUED_SAMPLES) {
+ mDecodeCallback->InputExhausted();
+ }
+
+ if (mToDo.get() == nullptr) {
+ mToDo = new AMessage(kNotifyDecoderActivity, id());
+ if (aEndOfStream) {
+ mToDo->setInt32("input-eos", 1);
+ }
+ mDecoder->requestActivityNotification(mToDo);
+ } else if (aEndOfStream) {
+ mToDo->setInt32("input-eos", 1);
+ }
+ } else {
+ GMDD_LOG("input processed: error#%d", rv);
+ mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__));
+ }
+}
+
+void
+GonkDecoderManager::ProcessFlush()
+{
+ MOZ_ASSERT(OnTaskLooper());
+
+ mLastTime = INT64_MIN;
+ MonitorAutoLock lock(mFlushMonitor);
+ mWaitOutput.Clear();
+ if (mDecoder->flush() != OK) {
+ GMDD_LOG("flush error");
+ mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__));
+ }
+ mIsFlushing = false;
+ lock.NotifyAll();
+}
+
+// Use output timestamp to determine which output buffer is already returned
+// and remove corresponding info, except for EOS, from the waiting list.
+// This method handles the cases that audio decoder sends multiple output
+// buffers for one input.
+void
+GonkDecoderManager::UpdateWaitingList(int64_t aForgetUpTo)
+{
+ MOZ_ASSERT(OnTaskLooper());
+
+ size_t i;
+ for (i = 0; i < mWaitOutput.Length(); i++) {
+ const auto& item = mWaitOutput.ElementAt(i);
+ if (item.mEOS || item.mTimestamp > aForgetUpTo) {
+ break;
+ }
+ }
+ if (i > 0) {
+ mWaitOutput.RemoveElementsAt(0, i);
+ }
+}
+
+void
+GonkDecoderManager::ProcessToDo(bool aEndOfStream)
+{
+ MOZ_ASSERT(OnTaskLooper());
+
+ MOZ_ASSERT(mToDo.get() != nullptr);
+ mToDo.clear();
+
+ if (NumQueuedSamples() > 0 && ProcessQueuedSamples() < 0) {
+ mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__));
+ return;
+ }
+
+ while (mWaitOutput.Length() > 0) {
+ RefPtr<MediaData> output;
+ WaitOutputInfo wait = mWaitOutput.ElementAt(0);
+ nsresult rv = Output(wait.mOffset, output);
+ if (rv == NS_OK) {
+ MOZ_ASSERT(output);
+ mDecodeCallback->Output(output);
+ UpdateWaitingList(output->mTime);
+ } else if (rv == NS_ERROR_ABORT) {
+ // EOS
+ MOZ_ASSERT(mQueuedSamples.IsEmpty());
+ if (output) {
+ mDecodeCallback->Output(output);
+ UpdateWaitingList(output->mTime);
+ }
+ MOZ_ASSERT(mWaitOutput.Length() == 1);
+ mWaitOutput.RemoveElementAt(0);
+ mDecodeCallback->DrainComplete();
+ ResetEOS();
+ return;
+ } else if (rv == NS_ERROR_NOT_AVAILABLE) {
+ break;
+ } else {
+ mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__));
+ return;
+ }
+ }
+
+ if (!aEndOfStream && NumQueuedSamples() <= MIN_QUEUED_SAMPLES) {
+ mDecodeCallback->InputExhausted();
+ // No need to shedule todo task this time because InputExhausted() will
+ // cause Input() to be invoked and do it for us.
+ return;
+ }
+
+ if (NumQueuedSamples() || mWaitOutput.Length() > 0) {
+ mToDo = new AMessage(kNotifyDecoderActivity, id());
+ if (aEndOfStream) {
+ mToDo->setInt32("input-eos", 1);
+ }
+ mDecoder->requestActivityNotification(mToDo);
+ }
+}
+
+void
+GonkDecoderManager::ResetEOS()
+{
+ // After eos, android::MediaCodec needs to be flushed to receive next input
+ mWaitOutput.Clear();
+ if (mDecoder->flush() != OK) {
+ GMDD_LOG("flush error");
+ mDecodeCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ __func__));
+ }
+}
+
+void
+GonkDecoderManager::onMessageReceived(const sp<AMessage> &aMessage)
+{
+ switch (aMessage->what()) {
+ case kNotifyProcessInput:
+ {
+ int32_t eos = 0;
+ ProcessInput(aMessage->findInt32("input-eos", &eos) && eos);
+ break;
+ }
+ case kNotifyProcessFlush:
+ {
+ ProcessFlush();
+ break;
+ }
+ case kNotifyDecoderActivity:
+ {
+ int32_t eos = 0;
+ ProcessToDo(aMessage->findInt32("input-eos", &eos) && eos);
+ break;
+ }
+#ifdef DEBUG
+ case kNotifyFindLooperId:
+ {
+ mTaskLooperId = androidGetThreadId();
+ MOZ_ASSERT(mTaskLooperId);
+ break;
+ }
+#endif
+ default:
+ {
+ TRESPASS();
+ break;
+ }
+ }
+}
+
+#ifdef DEBUG
+bool
+GonkDecoderManager::OnTaskLooper()
+{
+ return androidGetThreadId() == mTaskLooperId;
+}
+#endif
+
+GonkMediaDataDecoder::GonkMediaDataDecoder(GonkDecoderManager* aManager,
+ MediaDataDecoderCallback* aCallback)
+ : mManager(aManager)
+{
+ MOZ_COUNT_CTOR(GonkMediaDataDecoder);
+ mManager->SetDecodeCallback(aCallback);
+}
+
+GonkMediaDataDecoder::~GonkMediaDataDecoder()
+{
+ MOZ_COUNT_DTOR(GonkMediaDataDecoder);
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+GonkMediaDataDecoder::Init()
+{
+ return mManager->Init();
+}
+
+void
+GonkMediaDataDecoder::Shutdown()
+{
+ mManager->Shutdown();
+
+ // Because codec allocated runnable and init promise is at reader TaskQueue,
+ // so manager needs to be destroyed at reader TaskQueue to prevent racing.
+ mManager = nullptr;
+}
+
+// Inserts data into the decoder's pipeline.
+void
+GonkMediaDataDecoder::Input(MediaRawData* aSample)
+{
+ mManager->Input(aSample);
+}
+
+void
+GonkMediaDataDecoder::Flush()
+{
+ mManager->Flush();
+}
+
+void
+GonkMediaDataDecoder::Drain()
+{
+ mManager->Input(nullptr);
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/gonk/GonkMediaDataDecoder.h b/dom/media/platforms/gonk/GonkMediaDataDecoder.h
new file mode 100644
index 000000000..bba2a8645
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkMediaDataDecoder.h
@@ -0,0 +1,214 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GonkMediaDataDecoder_h_)
+#define GonkMediaDataDecoder_h_
+#include "PlatformDecoderModule.h"
+#include <stagefright/foundation/AHandler.h>
+
+namespace android {
+struct ALooper;
+class MediaBuffer;
+class MediaCodecProxy;
+} // namespace android
+
+namespace mozilla {
+class MediaRawData;
+
+// Manage the data flow from inputting encoded data and outputting decode data.
+class GonkDecoderManager : public android::AHandler {
+public:
+ typedef TrackInfo::TrackType TrackType;
+ typedef MediaDataDecoder::InitPromise InitPromise;
+
+ virtual ~GonkDecoderManager() {}
+
+ virtual RefPtr<InitPromise> Init() = 0;
+ virtual const char* GetDescriptionName() const = 0;
+
+ // Asynchronously send sample into mDecoder. If out of input buffer, aSample
+ // will be queued for later re-send.
+ nsresult Input(MediaRawData* aSample);
+
+ // Flush the queued samples and signal decoder to throw all pending input/output away.
+ nsresult Flush();
+
+ // Shutdown decoder and rejects the init promise.
+ virtual nsresult Shutdown();
+
+ // How many samples are waiting for processing.
+ size_t NumQueuedSamples();
+
+ // Set callback for decoder events, such as requesting more input,
+ // returning output, or reporting error.
+ void SetDecodeCallback(MediaDataDecoderCallback* aCallback)
+ {
+ mDecodeCallback = aCallback;
+ }
+
+protected:
+ GonkDecoderManager()
+ : mMutex("GonkDecoderManager")
+ , mLastTime(INT64_MIN)
+ , mFlushMonitor("GonkDecoderManager::Flush")
+ , mIsFlushing(false)
+ , mDecodeCallback(nullptr)
+ {}
+
+ bool InitLoopers(MediaData::Type aType);
+
+ void onMessageReceived(const android::sp<android::AMessage> &aMessage) override;
+
+ // Produces decoded output. It returns NS_OK on success, or NS_ERROR_NOT_AVAILABLE
+ // when output is not produced yet.
+ // If this returns a failure code other than NS_ERROR_NOT_AVAILABLE, an error
+ // will be reported through mDecodeCallback.
+ virtual nsresult Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutput) = 0;
+
+ // Send queued samples to OMX. It returns how many samples are still in
+ // queue after processing, or negative error code if failed.
+ int32_t ProcessQueuedSamples();
+
+ void ProcessInput(bool aEndOfStream);
+ virtual void ProcessFlush();
+ void ProcessToDo(bool aEndOfStream);
+ virtual void ResetEOS();
+
+ RefPtr<MediaByteBuffer> mCodecSpecificData;
+
+ nsAutoCString mMimeType;
+
+ // MediaCodedc's wrapper that performs the decoding.
+ android::sp<android::MediaCodecProxy> mDecoder;
+ // Looper for mDecoder to run on.
+ android::sp<android::ALooper> mDecodeLooper;
+ // Looper to run decode tasks such as processing input, output, flush, and
+ // recycling output buffers.
+ android::sp<android::ALooper> mTaskLooper;
+ // Message codes for tasks running on mTaskLooper.
+ enum {
+ // Decoder will send this to indicate internal state change such as input or
+ // output buffers availability. Used to run pending input & output tasks.
+ kNotifyDecoderActivity = 'nda ',
+ // Signal the decoder to flush.
+ kNotifyProcessFlush = 'npf ',
+ // Used to process queued samples when there is new input.
+ kNotifyProcessInput = 'npi ',
+#ifdef DEBUG
+ kNotifyFindLooperId = 'nfli',
+#endif
+ };
+
+ MozPromiseHolder<InitPromise> mInitPromise;
+
+ Mutex mMutex; // Protects mQueuedSamples.
+ // A queue that stores the samples waiting to be sent to mDecoder.
+ // Empty element means EOS and there shouldn't be any sample be queued after it.
+ // Samples are queued in caller's thread and dequeued in mTaskLooper.
+ nsTArray<RefPtr<MediaRawData>> mQueuedSamples;
+
+ // The last decoded frame presentation time. Only accessed on mTaskLooper.
+ int64_t mLastTime;
+
+ Monitor mFlushMonitor; // Waits for flushing to complete.
+ bool mIsFlushing; // Protected by mFlushMonitor.
+
+ // Remembers the notification that is currently waiting for the decoder event
+ // to avoid requesting more than one notification at the time, which is
+ // forbidden by mDecoder.
+ android::sp<android::AMessage> mToDo;
+
+ // Stores sample info for output buffer processing later.
+ struct WaitOutputInfo {
+ WaitOutputInfo(int64_t aOffset, int64_t aTimestamp, bool aEOS)
+ : mOffset(aOffset)
+ , mTimestamp(aTimestamp)
+ , mEOS(aEOS)
+ {}
+ const int64_t mOffset;
+ const int64_t mTimestamp;
+ const bool mEOS;
+ };
+
+ nsTArray<WaitOutputInfo> mWaitOutput;
+
+ MediaDataDecoderCallback* mDecodeCallback; // Reports decoder output or error.
+
+private:
+ void UpdateWaitingList(int64_t aForgetUpTo);
+
+#ifdef DEBUG
+ typedef void* LooperId;
+
+ bool OnTaskLooper();
+ LooperId mTaskLooperId;
+#endif
+};
+
+class AutoReleaseMediaBuffer
+{
+public:
+ AutoReleaseMediaBuffer(android::MediaBuffer* aBuffer, android::MediaCodecProxy* aCodec)
+ : mBuffer(aBuffer)
+ , mCodec(aCodec)
+ {}
+
+ ~AutoReleaseMediaBuffer()
+ {
+ MOZ_ASSERT(mCodec.get());
+ if (mBuffer) {
+ mCodec->ReleaseMediaBuffer(mBuffer);
+ }
+ }
+
+ android::MediaBuffer* forget()
+ {
+ android::MediaBuffer* tmp = mBuffer;
+ mBuffer = nullptr;
+ return tmp;
+ }
+
+private:
+ android::MediaBuffer* mBuffer;
+ android::sp<android::MediaCodecProxy> mCodec;
+};
+
+// Samples are decoded using the GonkDecoder (MediaCodec)
+// created by the GonkDecoderManager. This class implements
+// the higher-level logic that drives mapping the Gonk to the async
+// MediaDataDecoder interface. The specifics of decoding the exact stream
+// type are handled by GonkDecoderManager and the GonkDecoder it creates.
+class GonkMediaDataDecoder : public MediaDataDecoder {
+public:
+ GonkMediaDataDecoder(GonkDecoderManager* aDecoderManager,
+ MediaDataDecoderCallback* aCallback);
+
+ ~GonkMediaDataDecoder();
+
+ RefPtr<InitPromise> Init() override;
+
+ void Input(MediaRawData* aSample) override;
+
+ void Flush() override;
+
+ void Drain() override;
+
+ void Shutdown() override;
+
+ const char* GetDescriptionName() const override
+ {
+ return "gonk decoder";
+ }
+
+private:
+
+ android::sp<GonkDecoderManager> mManager;
+};
+
+} // namespace mozilla
+
+#endif // GonkMediaDataDecoder_h_
diff --git a/dom/media/platforms/gonk/GonkVideoDecoderManager.cpp b/dom/media/platforms/gonk/GonkVideoDecoderManager.cpp
new file mode 100644
index 000000000..0c7b3b6af
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkVideoDecoderManager.cpp
@@ -0,0 +1,772 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#include "MediaCodecProxy.h"
+#include <OMX_IVCommon.h>
+#include <gui/Surface.h>
+#include <ICrypto.h>
+#include "GonkVideoDecoderManager.h"
+#include "GrallocImages.h"
+#include "MediaDecoderReader.h"
+#include "ImageContainer.h"
+#include "VideoUtils.h"
+#include "nsThreadUtils.h"
+#include "Layers.h"
+#include "mozilla/Logging.h"
+#include <stagefright/MediaBuffer.h>
+#include <stagefright/MetaData.h>
+#include <stagefright/MediaErrors.h>
+#include <stagefright/foundation/AString.h>
+#include "GonkNativeWindow.h"
+#include "mozilla/layers/GrallocTextureClient.h"
+#include "mozilla/layers/ImageBridgeChild.h"
+#include "mozilla/layers/TextureClient.h"
+#include "mozilla/layers/TextureClientRecycleAllocator.h"
+#include <cutils/properties.h>
+
+#define CODECCONFIG_TIMEOUT_US 10000LL
+#define READ_OUTPUT_BUFFER_TIMEOUT_US 0LL
+
+#include <android/log.h>
+#define GVDM_LOG(...) __android_log_print(ANDROID_LOG_DEBUG, "GonkVideoDecoderManager", __VA_ARGS__)
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+using namespace mozilla::layers;
+using namespace android;
+typedef android::MediaCodecProxy MediaCodecProxy;
+
+namespace mozilla {
+
+class GonkTextureClientAllocationHelper : public layers::ITextureClientAllocationHelper
+{
+public:
+ GonkTextureClientAllocationHelper(uint32_t aGrallocFormat,
+ gfx::IntSize aSize)
+ : ITextureClientAllocationHelper(gfx::SurfaceFormat::UNKNOWN,
+ aSize,
+ BackendSelector::Content,
+ TextureFlags::DEALLOCATE_CLIENT,
+ ALLOC_DISALLOW_BUFFERTEXTURECLIENT)
+ , mGrallocFormat(aGrallocFormat)
+ {}
+
+ already_AddRefed<TextureClient> Allocate(KnowsCompositor* aAllocator) override
+ {
+ uint32_t usage = android::GraphicBuffer::USAGE_SW_READ_OFTEN |
+ android::GraphicBuffer::USAGE_SW_WRITE_OFTEN |
+ android::GraphicBuffer::USAGE_HW_TEXTURE;
+
+ GrallocTextureData* texData = GrallocTextureData::Create(mSize, mGrallocFormat,
+ gfx::BackendType::NONE,
+ usage, aAllocator->GetTextureForwarder());
+ if (!texData) {
+ return nullptr;
+ }
+ sp<GraphicBuffer> graphicBuffer = texData->GetGraphicBuffer();
+ if (!graphicBuffer.get()) {
+ return nullptr;
+ }
+ RefPtr<TextureClient> textureClient =
+ TextureClient::CreateWithData(texData, TextureFlags::DEALLOCATE_CLIENT, aAllocator->GetTextureForwarder());
+ return textureClient.forget();
+ }
+
+ bool IsCompatible(TextureClient* aTextureClient) override
+ {
+ if (!aTextureClient) {
+ return false;
+ }
+ sp<GraphicBuffer> graphicBuffer =
+ static_cast<GrallocTextureData*>(aTextureClient->GetInternalData())->GetGraphicBuffer();
+ if (!graphicBuffer.get() ||
+ static_cast<uint32_t>(graphicBuffer->getPixelFormat()) != mGrallocFormat ||
+ aTextureClient->GetSize() != mSize) {
+ return false;
+ }
+ return true;
+ }
+
+private:
+ uint32_t mGrallocFormat;
+};
+
+GonkVideoDecoderManager::GonkVideoDecoderManager(
+ mozilla::layers::ImageContainer* aImageContainer,
+ const VideoInfo& aConfig)
+ : mConfig(aConfig)
+ , mImageContainer(aImageContainer)
+ , mColorConverterBufferSize(0)
+ , mPendingReleaseItemsLock("GonkVideoDecoderManager::mPendingReleaseItemsLock")
+ , mNeedsCopyBuffer(false)
+{
+ MOZ_COUNT_CTOR(GonkVideoDecoderManager);
+}
+
+GonkVideoDecoderManager::~GonkVideoDecoderManager()
+{
+ MOZ_COUNT_DTOR(GonkVideoDecoderManager);
+}
+
+nsresult
+GonkVideoDecoderManager::Shutdown()
+{
+ mVideoCodecRequest.DisconnectIfExists();
+ return GonkDecoderManager::Shutdown();
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+GonkVideoDecoderManager::Init()
+{
+ mNeedsCopyBuffer = false;
+
+ uint32_t maxWidth, maxHeight;
+ char propValue[PROPERTY_VALUE_MAX];
+ property_get("ro.moz.omx.hw.max_width", propValue, "-1");
+ maxWidth = -1 == atoi(propValue) ? MAX_VIDEO_WIDTH : atoi(propValue);
+ property_get("ro.moz.omx.hw.max_height", propValue, "-1");
+ maxHeight = -1 == atoi(propValue) ? MAX_VIDEO_HEIGHT : atoi(propValue) ;
+
+ if (uint32_t(mConfig.mImage.width * mConfig.mImage.height) > maxWidth * maxHeight) {
+ GVDM_LOG("Video resolution exceeds hw codec capability");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ // Validate the container-reported frame and pictureRect sizes. This ensures
+ // that our video frame creation code doesn't overflow.
+ if (!IsValidVideoRegion(mConfig.mImage, mConfig.ImageRect(), mConfig.mDisplay)) {
+ GVDM_LOG("It is not a valid region");
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue();
+ MOZ_ASSERT(mReaderTaskQueue);
+
+ if (mDecodeLooper.get() != nullptr) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ if (!InitLoopers(MediaData::VIDEO_DATA)) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ }
+
+ RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
+ android::sp<GonkVideoDecoderManager> self = this;
+ mDecoder = MediaCodecProxy::CreateByType(mDecodeLooper,
+ mConfig.mMimeType.get(),
+ false);
+
+ uint32_t capability = MediaCodecProxy::kEmptyCapability;
+ if (mDecoder->getCapability(&capability) == OK && (capability &
+ MediaCodecProxy::kCanExposeGraphicBuffer)) {
+#if ANDROID_VERSION >= 21
+ sp<IGonkGraphicBufferConsumer> consumer;
+ GonkBufferQueue::createBufferQueue(&mGraphicBufferProducer, &consumer);
+ mNativeWindow = new GonkNativeWindow(consumer);
+#else
+ mNativeWindow = new GonkNativeWindow();
+#endif
+ }
+
+ mVideoCodecRequest.Begin(mDecoder->AsyncAllocateVideoMediaCodec()
+ ->Then(mReaderTaskQueue, __func__,
+ [self] (bool) -> void {
+ self->mVideoCodecRequest.Complete();
+ self->codecReserved();
+ }, [self] (bool) -> void {
+ self->mVideoCodecRequest.Complete();
+ self->codecCanceled();
+ }));
+
+ return p;
+}
+
+nsresult
+GonkVideoDecoderManager::CreateVideoData(MediaBuffer* aBuffer,
+ int64_t aStreamOffset,
+ VideoData **v)
+{
+ *v = nullptr;
+ RefPtr<VideoData> data;
+ int64_t timeUs;
+ int32_t keyFrame;
+
+ if (aBuffer == nullptr) {
+ GVDM_LOG("Video Buffer is not valid!");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ AutoReleaseMediaBuffer autoRelease(aBuffer, mDecoder.get());
+
+ if (!aBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
+ GVDM_LOG("Decoder did not return frame time");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (mLastTime > timeUs) {
+ GVDM_LOG("Output decoded sample time is revert. time=%lld", timeUs);
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ mLastTime = timeUs;
+
+ if (aBuffer->range_length() == 0) {
+ // Some decoders may return spurious empty buffers that we just want to ignore
+ // quoted from Android's AwesomePlayer.cpp
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (!aBuffer->meta_data()->findInt32(kKeyIsSyncFrame, &keyFrame)) {
+ keyFrame = 0;
+ }
+
+ gfx::IntRect picture =
+ mConfig.ScaledImageRect(mFrameInfo.mWidth, mFrameInfo.mHeight);
+ if (aBuffer->graphicBuffer().get()) {
+ data = CreateVideoDataFromGraphicBuffer(aBuffer, picture);
+ if (data && !mNeedsCopyBuffer) {
+ // RecycleCallback() will be responsible for release the buffer.
+ autoRelease.forget();
+ }
+ mNeedsCopyBuffer = false;
+ } else {
+ data = CreateVideoDataFromDataBuffer(aBuffer, picture);
+ }
+
+ if (!data) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ // Fill necessary info.
+ data->mOffset = aStreamOffset;
+ data->mTime = timeUs;
+ data->mKeyframe = keyFrame;
+
+ data.forget(v);
+ return NS_OK;
+}
+
+// Copy pixels from one planar YUV to another.
+static void
+CopyYUV(PlanarYCbCrData& aSource, PlanarYCbCrData& aDestination)
+{
+ // Fill Y plane.
+ uint8_t* srcY = aSource.mYChannel;
+ gfx::IntSize ySize = aSource.mYSize;
+ uint8_t* destY = aDestination.mYChannel;
+ // Y plane.
+ for (int i = 0; i < ySize.height; i++) {
+ memcpy(destY, srcY, ySize.width);
+ srcY += aSource.mYStride;
+ destY += aDestination.mYStride;
+ }
+
+ // Fill UV plane.
+ // Line start
+ uint8_t* srcU = aSource.mCbChannel;
+ uint8_t* srcV = aSource.mCrChannel;
+ uint8_t* destU = aDestination.mCbChannel;
+ uint8_t* destV = aDestination.mCrChannel;
+
+ gfx::IntSize uvSize = aSource.mCbCrSize;
+ for (int i = 0; i < uvSize.height; i++) {
+ uint8_t* su = srcU;
+ uint8_t* sv = srcV;
+ uint8_t* du = destU;
+ uint8_t* dv =destV;
+ for (int j = 0; j < uvSize.width; j++) {
+ *du++ = *su++;
+ *dv++ = *sv++;
+ // Move to next pixel.
+ su += aSource.mCbSkip;
+ sv += aSource.mCrSkip;
+ du += aDestination.mCbSkip;
+ dv += aDestination.mCrSkip;
+ }
+ // Move to next line.
+ srcU += aSource.mCbCrStride;
+ srcV += aSource.mCbCrStride;
+ destU += aDestination.mCbCrStride;
+ destV += aDestination.mCbCrStride;
+ }
+}
+
+inline static int
+Align(int aX, int aAlign)
+{
+ return (aX + aAlign - 1) & ~(aAlign - 1);
+}
+
+// Venus formats are doucmented in kernel/include/media/msm_media_info.h:
+// * Y_Stride : Width aligned to 128
+// * UV_Stride : Width aligned to 128
+// * Y_Scanlines: Height aligned to 32
+// * UV_Scanlines: Height/2 aligned to 16
+// * Total size = align((Y_Stride * Y_Scanlines
+// * + UV_Stride * UV_Scanlines + 4096), 4096)
+static void
+CopyVenus(uint8_t* aSrc, uint8_t* aDest, uint32_t aWidth, uint32_t aHeight)
+{
+ size_t yStride = Align(aWidth, 128);
+ uint8_t* s = aSrc;
+ uint8_t* d = aDest;
+ for (size_t i = 0; i < aHeight; i++) {
+ memcpy(d, s, aWidth);
+ s += yStride;
+ d += yStride;
+ }
+ size_t uvStride = yStride;
+ size_t uvLines = (aHeight + 1) / 2;
+ size_t ySize = yStride * Align(aHeight, 32);
+ s = aSrc + ySize;
+ d = aDest + ySize;
+ for (size_t i = 0; i < uvLines; i++) {
+ memcpy(d, s, aWidth);
+ s += uvStride;
+ d += uvStride;
+ }
+}
+
+static void
+CopyGraphicBuffer(sp<GraphicBuffer>& aSource, sp<GraphicBuffer>& aDestination)
+{
+ void* srcPtr = nullptr;
+ aSource->lock(GraphicBuffer::USAGE_SW_READ_OFTEN, &srcPtr);
+ void* destPtr = nullptr;
+ aDestination->lock(GraphicBuffer::USAGE_SW_WRITE_OFTEN, &destPtr);
+ MOZ_ASSERT(srcPtr && destPtr);
+
+ // Build PlanarYCbCrData for source buffer.
+ PlanarYCbCrData srcData;
+ switch (aSource->getPixelFormat()) {
+ case HAL_PIXEL_FORMAT_YV12: {
+ // Android YV12 format is defined in system/core/include/system/graphics.h
+ srcData.mYChannel = static_cast<uint8_t*>(srcPtr);
+ srcData.mYSkip = 0;
+ srcData.mYSize.width = aSource->getWidth();
+ srcData.mYSize.height = aSource->getHeight();
+ srcData.mYStride = aSource->getStride();
+ // 4:2:0.
+ srcData.mCbCrSize.width = srcData.mYSize.width / 2;
+ srcData.mCbCrSize.height = srcData.mYSize.height / 2;
+ srcData.mCrChannel = srcData.mYChannel + (srcData.mYStride * srcData.mYSize.height);
+ // Aligned to 16 bytes boundary.
+ srcData.mCbCrStride = Align(srcData.mYStride / 2, 16);
+ srcData.mCrSkip = 0;
+ srcData.mCbChannel = srcData.mCrChannel + (srcData.mCbCrStride * srcData.mCbCrSize.height);
+ srcData.mCbSkip = 0;
+
+ // Build PlanarYCbCrData for destination buffer.
+ PlanarYCbCrData destData;
+ destData.mYChannel = static_cast<uint8_t*>(destPtr);
+ destData.mYSkip = 0;
+ destData.mYSize.width = aDestination->getWidth();
+ destData.mYSize.height = aDestination->getHeight();
+ destData.mYStride = aDestination->getStride();
+ // 4:2:0.
+ destData.mCbCrSize.width = destData.mYSize.width / 2;
+ destData.mCbCrSize.height = destData.mYSize.height / 2;
+ destData.mCrChannel = destData.mYChannel + (destData.mYStride * destData.mYSize.height);
+ // Aligned to 16 bytes boundary.
+ destData.mCbCrStride = Align(destData.mYStride / 2, 16);
+ destData.mCrSkip = 0;
+ destData.mCbChannel = destData.mCrChannel + (destData.mCbCrStride * destData.mCbCrSize.height);
+ destData.mCbSkip = 0;
+
+ CopyYUV(srcData, destData);
+ break;
+ }
+ case GrallocImage::HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
+ CopyVenus(static_cast<uint8_t*>(srcPtr),
+ static_cast<uint8_t*>(destPtr),
+ aSource->getWidth(),
+ aSource->getHeight());
+ break;
+ default:
+ NS_ERROR("Unsupported input gralloc image type. Should never be here.");
+ }
+
+
+ aSource->unlock();
+ aDestination->unlock();
+}
+
+already_AddRefed<VideoData>
+GonkVideoDecoderManager::CreateVideoDataFromGraphicBuffer(MediaBuffer* aSource,
+ gfx::IntRect& aPicture)
+{
+ sp<GraphicBuffer> srcBuffer(aSource->graphicBuffer());
+ RefPtr<TextureClient> textureClient;
+
+ if (mNeedsCopyBuffer) {
+ // Copy buffer contents for bug 1199809.
+ if (!mCopyAllocator) {
+ RefPtr<layers::ImageBridgeChild> bridge = layers::ImageBridgeChild::GetSingleton();
+ mCopyAllocator = new TextureClientRecycleAllocator(bridge);
+ }
+ if (!mCopyAllocator) {
+ GVDM_LOG("Create buffer allocator failed!");
+ return nullptr;
+ }
+
+ gfx::IntSize size(srcBuffer->getWidth(), srcBuffer->getHeight());
+ GonkTextureClientAllocationHelper helper(srcBuffer->getPixelFormat(), size);
+ textureClient = mCopyAllocator->CreateOrRecycle(helper);
+ if (!textureClient) {
+ GVDM_LOG("Copy buffer allocation failed!");
+ return nullptr;
+ }
+
+ sp<GraphicBuffer> destBuffer =
+ static_cast<GrallocTextureData*>(textureClient->GetInternalData())->GetGraphicBuffer();
+
+ CopyGraphicBuffer(srcBuffer, destBuffer);
+ } else {
+ textureClient = mNativeWindow->getTextureClientFromBuffer(srcBuffer.get());
+ textureClient->SetRecycleCallback(GonkVideoDecoderManager::RecycleCallback, this);
+ static_cast<GrallocTextureData*>(textureClient->GetInternalData())->SetMediaBuffer(aSource);
+ }
+
+ RefPtr<VideoData> data =
+ VideoData::CreateAndCopyIntoTextureClient(mConfig,
+ 0, // Filled later by caller.
+ 0, // Filled later by caller.
+ 1, // No way to pass sample duration from muxer to
+ // OMX codec, so we hardcode the duration here.
+ textureClient,
+ false, // Filled later by caller.
+ -1,
+ aPicture);
+ return data.forget();
+}
+
+already_AddRefed<VideoData>
+GonkVideoDecoderManager::CreateVideoDataFromDataBuffer(MediaBuffer* aSource, gfx::IntRect& aPicture)
+{
+ if (!aSource->data()) {
+ GVDM_LOG("No data in Video Buffer!");
+ return nullptr;
+ }
+ uint8_t *yuv420p_buffer = (uint8_t *)aSource->data();
+ int32_t stride = mFrameInfo.mStride;
+ int32_t slice_height = mFrameInfo.mSliceHeight;
+
+ // Converts to OMX_COLOR_FormatYUV420Planar
+ if (mFrameInfo.mColorFormat != OMX_COLOR_FormatYUV420Planar) {
+ ARect crop;
+ crop.top = 0;
+ crop.bottom = mFrameInfo.mHeight;
+ crop.left = 0;
+ crop.right = mFrameInfo.mWidth;
+ yuv420p_buffer = GetColorConverterBuffer(mFrameInfo.mWidth, mFrameInfo.mHeight);
+ if (mColorConverter.convertDecoderOutputToI420(aSource->data(),
+ mFrameInfo.mWidth, mFrameInfo.mHeight, crop, yuv420p_buffer) != OK) {
+ GVDM_LOG("Color conversion failed!");
+ return nullptr;
+ }
+ stride = mFrameInfo.mWidth;
+ slice_height = mFrameInfo.mHeight;
+ }
+
+ size_t yuv420p_y_size = stride * slice_height;
+ size_t yuv420p_u_size = ((stride + 1) / 2) * ((slice_height + 1) / 2);
+ uint8_t *yuv420p_y = yuv420p_buffer;
+ uint8_t *yuv420p_u = yuv420p_y + yuv420p_y_size;
+ uint8_t *yuv420p_v = yuv420p_u + yuv420p_u_size;
+
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = yuv420p_y;
+ b.mPlanes[0].mWidth = mFrameInfo.mWidth;
+ b.mPlanes[0].mHeight = mFrameInfo.mHeight;
+ b.mPlanes[0].mStride = stride;
+ b.mPlanes[0].mOffset = 0;
+ b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = yuv420p_u;
+ b.mPlanes[1].mWidth = (mFrameInfo.mWidth + 1) / 2;
+ b.mPlanes[1].mHeight = (mFrameInfo.mHeight + 1) / 2;
+ b.mPlanes[1].mStride = (stride + 1) / 2;
+ b.mPlanes[1].mOffset = 0;
+ b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = yuv420p_v;
+ b.mPlanes[2].mWidth =(mFrameInfo.mWidth + 1) / 2;
+ b.mPlanes[2].mHeight = (mFrameInfo.mHeight + 1) / 2;
+ b.mPlanes[2].mStride = (stride + 1) / 2;
+ b.mPlanes[2].mOffset = 0;
+ b.mPlanes[2].mSkip = 0;
+
+ RefPtr<VideoData> data =
+ VideoData::CreateAndCopyData(mConfig,
+ mImageContainer,
+ 0, // Filled later by caller.
+ 0, // Filled later by caller.
+ 1, // We don't know the duration.
+ b,
+ 0, // Filled later by caller.
+ -1,
+ aPicture);
+
+ return data.forget();
+}
+
+bool
+GonkVideoDecoderManager::SetVideoFormat()
+{
+ // read video metadata from MediaCodec
+ sp<AMessage> codecFormat;
+ if (mDecoder->getOutputFormat(&codecFormat) == OK) {
+ AString mime;
+ int32_t width = 0;
+ int32_t height = 0;
+ int32_t stride = 0;
+ int32_t slice_height = 0;
+ int32_t color_format = 0;
+ int32_t crop_left = 0;
+ int32_t crop_top = 0;
+ int32_t crop_right = 0;
+ int32_t crop_bottom = 0;
+ if (!codecFormat->findString("mime", &mime) ||
+ !codecFormat->findInt32("width", &width) ||
+ !codecFormat->findInt32("height", &height) ||
+ !codecFormat->findInt32("stride", &stride) ||
+ !codecFormat->findInt32("slice-height", &slice_height) ||
+ !codecFormat->findInt32("color-format", &color_format) ||
+ !codecFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
+ GVDM_LOG("Failed to find values");
+ return false;
+ }
+ mFrameInfo.mWidth = width;
+ mFrameInfo.mHeight = height;
+ mFrameInfo.mStride = stride;
+ mFrameInfo.mSliceHeight = slice_height;
+ mFrameInfo.mColorFormat = color_format;
+
+ nsIntSize displaySize(width, height);
+ if (!IsValidVideoRegion(mConfig.mDisplay,
+ mConfig.ScaledImageRect(width, height),
+ displaySize)) {
+ GVDM_LOG("It is not a valid region");
+ return false;
+ }
+ return true;
+ }
+ GVDM_LOG("Fail to get output format");
+ return false;
+}
+
+// Blocks until decoded sample is produced by the deoder.
+nsresult
+GonkVideoDecoderManager::Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutData)
+{
+ aOutData = nullptr;
+ status_t err;
+ if (mDecoder == nullptr) {
+ GVDM_LOG("Decoder is not inited");
+ return NS_ERROR_UNEXPECTED;
+ }
+ MediaBuffer* outputBuffer = nullptr;
+ err = mDecoder->Output(&outputBuffer, READ_OUTPUT_BUFFER_TIMEOUT_US);
+
+ switch (err) {
+ case OK:
+ {
+ RefPtr<VideoData> data;
+ nsresult rv = CreateVideoData(outputBuffer, aStreamOffset, getter_AddRefs(data));
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ // Decoder outputs a empty video buffer, try again
+ return NS_ERROR_NOT_AVAILABLE;
+ } else if (rv != NS_OK || data == nullptr) {
+ GVDM_LOG("Failed to create VideoData");
+ return NS_ERROR_UNEXPECTED;
+ }
+ aOutData = data;
+ return NS_OK;
+ }
+ case android::INFO_FORMAT_CHANGED:
+ {
+ // If the format changed, update our cached info.
+ GVDM_LOG("Decoder format changed");
+ if (!SetVideoFormat()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+ return Output(aStreamOffset, aOutData);
+ }
+ case android::INFO_OUTPUT_BUFFERS_CHANGED:
+ {
+ if (mDecoder->UpdateOutputBuffers()) {
+ return Output(aStreamOffset, aOutData);
+ }
+ GVDM_LOG("Fails to update output buffers!");
+ return NS_ERROR_FAILURE;
+ }
+ case -EAGAIN:
+ {
+// GVDM_LOG("Need to try again!");
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ case android::ERROR_END_OF_STREAM:
+ {
+ GVDM_LOG("Got the EOS frame!");
+ RefPtr<VideoData> data;
+ nsresult rv = CreateVideoData(outputBuffer, aStreamOffset, getter_AddRefs(data));
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ // For EOS, no need to do any thing.
+ return NS_ERROR_ABORT;
+ }
+ if (rv != NS_OK || data == nullptr) {
+ GVDM_LOG("Failed to create video data");
+ return NS_ERROR_UNEXPECTED;
+ }
+ aOutData = data;
+ return NS_ERROR_ABORT;
+ }
+ case -ETIMEDOUT:
+ {
+ GVDM_LOG("Timeout. can try again next time");
+ return NS_ERROR_UNEXPECTED;
+ }
+ default:
+ {
+ GVDM_LOG("Decoder failed, err=%d", err);
+ return NS_ERROR_UNEXPECTED;
+ }
+ }
+
+ return NS_OK;
+}
+
+void
+GonkVideoDecoderManager::codecReserved()
+{
+ if (mInitPromise.IsEmpty()) {
+ return;
+ }
+ GVDM_LOG("codecReserved");
+ sp<AMessage> format = new AMessage;
+ sp<Surface> surface;
+ status_t rv = OK;
+ // Fixed values
+ GVDM_LOG("Configure video mime type: %s, width:%d, height:%d", mConfig.mMimeType.get(), mConfig.mImage.width, mConfig.mImage.height);
+ format->setString("mime", mConfig.mMimeType.get());
+ format->setInt32("width", mConfig.mImage.width);
+ format->setInt32("height", mConfig.mImage.height);
+ // Set the "moz-use-undequeued-bufs" to use the undeque buffers to accelerate
+ // the video decoding.
+ format->setInt32("moz-use-undequeued-bufs", 1);
+ if (mNativeWindow != nullptr) {
+#if ANDROID_VERSION >= 21
+ surface = new Surface(mGraphicBufferProducer);
+#else
+ surface = new Surface(mNativeWindow->getBufferQueue());
+#endif
+ }
+ mDecoder->configure(format, surface, nullptr, 0);
+ mDecoder->Prepare();
+
+ if (mConfig.mMimeType.EqualsLiteral("video/mp4v-es")) {
+ rv = mDecoder->Input(mConfig.mCodecSpecificConfig->Elements(),
+ mConfig.mCodecSpecificConfig->Length(), 0,
+ android::MediaCodec::BUFFER_FLAG_CODECCONFIG,
+ CODECCONFIG_TIMEOUT_US);
+ }
+
+ if (rv != OK) {
+ GVDM_LOG("Failed to configure codec!!!!");
+ mInitPromise.Reject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ return;
+ }
+
+ mInitPromise.Resolve(TrackType::kVideoTrack, __func__);
+}
+
+void
+GonkVideoDecoderManager::codecCanceled()
+{
+ GVDM_LOG("codecCanceled");
+ mInitPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+}
+
+// Called on GonkDecoderManager::mTaskLooper thread.
+void
+GonkVideoDecoderManager::onMessageReceived(const sp<AMessage> &aMessage)
+{
+ switch (aMessage->what()) {
+ case kNotifyPostReleaseBuffer:
+ {
+ ReleaseAllPendingVideoBuffers();
+ break;
+ }
+
+ default:
+ {
+ GonkDecoderManager::onMessageReceived(aMessage);
+ break;
+ }
+ }
+}
+
+uint8_t *
+GonkVideoDecoderManager::GetColorConverterBuffer(int32_t aWidth, int32_t aHeight)
+{
+ // Allocate a temporary YUV420Planer buffer.
+ size_t yuv420p_y_size = aWidth * aHeight;
+ size_t yuv420p_u_size = ((aWidth + 1) / 2) * ((aHeight + 1) / 2);
+ size_t yuv420p_v_size = yuv420p_u_size;
+ size_t yuv420p_size = yuv420p_y_size + yuv420p_u_size + yuv420p_v_size;
+ if (mColorConverterBufferSize != yuv420p_size) {
+ mColorConverterBuffer = MakeUnique<uint8_t[]>(yuv420p_size);
+ mColorConverterBufferSize = yuv420p_size;
+ }
+ return mColorConverterBuffer.get();
+}
+
+/* static */
+void
+GonkVideoDecoderManager::RecycleCallback(TextureClient* aClient, void* aClosure)
+{
+ MOZ_ASSERT(aClient && !aClient->IsDead());
+ GonkVideoDecoderManager* videoManager = static_cast<GonkVideoDecoderManager*>(aClosure);
+ GrallocTextureData* client = static_cast<GrallocTextureData*>(aClient->GetInternalData());
+ aClient->ClearRecycleCallback();
+ FenceHandle handle = aClient->GetAndResetReleaseFenceHandle();
+ videoManager->PostReleaseVideoBuffer(client->GetMediaBuffer(), handle);
+}
+
+void GonkVideoDecoderManager::PostReleaseVideoBuffer(
+ android::MediaBuffer *aBuffer,
+ FenceHandle aReleaseFence)
+{
+ {
+ MutexAutoLock autoLock(mPendingReleaseItemsLock);
+ if (aBuffer) {
+ mPendingReleaseItems.AppendElement(ReleaseItem(aBuffer, aReleaseFence));
+ }
+ }
+ sp<AMessage> notify =
+ new AMessage(kNotifyPostReleaseBuffer, id());
+ notify->post();
+
+}
+
+void GonkVideoDecoderManager::ReleaseAllPendingVideoBuffers()
+{
+ nsTArray<ReleaseItem> releasingItems;
+ {
+ MutexAutoLock autoLock(mPendingReleaseItemsLock);
+ releasingItems.AppendElements(mPendingReleaseItems);
+ mPendingReleaseItems.Clear();
+ }
+
+ // Free all pending video buffers without holding mPendingReleaseItemsLock.
+ size_t size = releasingItems.Length();
+ for (size_t i = 0; i < size; i++) {
+ RefPtr<FenceHandle::FdObj> fdObj = releasingItems[i].mReleaseFence.GetAndResetFdObj();
+ sp<Fence> fence = new Fence(fdObj->GetAndResetFd());
+ fence->waitForever("GonkVideoDecoderManager");
+ mDecoder->ReleaseMediaBuffer(releasingItems[i].mBuffer);
+ }
+ releasingItems.Clear();
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/gonk/GonkVideoDecoderManager.h b/dom/media/platforms/gonk/GonkVideoDecoderManager.h
new file mode 100644
index 000000000..343bb2a5c
--- /dev/null
+++ b/dom/media/platforms/gonk/GonkVideoDecoderManager.h
@@ -0,0 +1,149 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GonkVideoDecoderManager_h_)
+#define GonkVideoDecoderManager_h_
+
+#include "nsRect.h"
+#include "GonkMediaDataDecoder.h"
+#include "mozilla/RefPtr.h"
+#include "I420ColorConverterHelper.h"
+#include "MediaCodecProxy.h"
+#include "GonkNativeWindow.h"
+#include "mozilla/layers/FenceUtils.h"
+#include "mozilla/UniquePtr.h"
+#include <ui/Fence.h>
+
+using namespace android;
+
+namespace android {
+class MediaBuffer;
+struct MOZ_EXPORT AString;
+class GonkNativeWindow;
+} // namespace android
+
+namespace mozilla {
+
+namespace layers {
+class TextureClient;
+class TextureClientRecycleAllocator;
+} // namespace mozilla::layers
+
+class GonkVideoDecoderManager : public GonkDecoderManager {
+typedef android::MediaCodecProxy MediaCodecProxy;
+typedef mozilla::layers::TextureClient TextureClient;
+
+public:
+ GonkVideoDecoderManager(mozilla::layers::ImageContainer* aImageContainer,
+ const VideoInfo& aConfig);
+
+ virtual ~GonkVideoDecoderManager();
+
+ RefPtr<InitPromise> Init() override;
+
+ nsresult Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutput) override;
+
+ nsresult Shutdown() override;
+
+ const char* GetDescriptionName() const override
+ {
+ return "gonk video decoder";
+ }
+
+ static void RecycleCallback(TextureClient* aClient, void* aClosure);
+
+protected:
+ // Bug 1199809: workaround to avoid sending the graphic buffer by making a
+ // copy of output buffer after calling flush(). Bug 1203859 was created to
+ // reimplementing Gonk PDM on top of OpenMax IL directly. Its buffer
+ // management will work better with Gecko and solve problems like this.
+ void ProcessFlush() override
+ {
+ mNeedsCopyBuffer = true;
+ GonkDecoderManager::ProcessFlush();
+ }
+
+private:
+ struct FrameInfo
+ {
+ int32_t mWidth = 0;
+ int32_t mHeight = 0;
+ int32_t mStride = 0;
+ int32_t mSliceHeight = 0;
+ int32_t mColorFormat = 0;
+ int32_t mCropLeft = 0;
+ int32_t mCropTop = 0;
+ int32_t mCropRight = 0;
+ int32_t mCropBottom = 0;
+ };
+
+ void onMessageReceived(const android::sp<android::AMessage> &aMessage) override;
+
+ bool SetVideoFormat();
+
+ nsresult CreateVideoData(MediaBuffer* aBuffer, int64_t aStreamOffset, VideoData** aOutData);
+ already_AddRefed<VideoData> CreateVideoDataFromGraphicBuffer(android::MediaBuffer* aSource,
+ gfx::IntRect& aPicture);
+ already_AddRefed<VideoData> CreateVideoDataFromDataBuffer(android::MediaBuffer* aSource,
+ gfx::IntRect& aPicture);
+
+ uint8_t* GetColorConverterBuffer(int32_t aWidth, int32_t aHeight);
+
+ // For codec resource management
+ void codecReserved();
+ void codecCanceled();
+
+ void ReleaseAllPendingVideoBuffers();
+ void PostReleaseVideoBuffer(android::MediaBuffer *aBuffer,
+ layers::FenceHandle mReleaseFence);
+
+ VideoInfo mConfig;
+
+ RefPtr<layers::ImageContainer> mImageContainer;
+ RefPtr<layers::TextureClientRecycleAllocator> mCopyAllocator;
+
+ MozPromiseRequestHolder<android::MediaCodecProxy::CodecPromise> mVideoCodecRequest;
+ FrameInfo mFrameInfo;
+
+ // color converter
+ android::I420ColorConverterHelper mColorConverter;
+ UniquePtr<uint8_t[]> mColorConverterBuffer;
+ size_t mColorConverterBufferSize;
+
+ android::sp<android::GonkNativeWindow> mNativeWindow;
+#if ANDROID_VERSION >= 21
+ android::sp<android::IGraphicBufferProducer> mGraphicBufferProducer;
+#endif
+
+ enum {
+ kNotifyPostReleaseBuffer = 'nprb',
+ };
+
+ struct ReleaseItem {
+ ReleaseItem(android::MediaBuffer* aBuffer, layers::FenceHandle& aFence)
+ : mBuffer(aBuffer)
+ , mReleaseFence(aFence) {}
+ android::MediaBuffer* mBuffer;
+ layers::FenceHandle mReleaseFence;
+ };
+ nsTArray<ReleaseItem> mPendingReleaseItems;
+
+ // The lock protects mPendingReleaseItems.
+ Mutex mPendingReleaseItemsLock;
+
+ // This TaskQueue should be the same one in mDecodeCallback->OnReaderTaskQueue().
+ // It is for codec resource mangement, decoding task should not dispatch to it.
+ RefPtr<TaskQueue> mReaderTaskQueue;
+
+ // Bug 1199809: do we need to make a copy of output buffer? Used only when
+ // the decoder outputs graphic buffers.
+ bool mNeedsCopyBuffer;
+};
+
+} // namespace mozilla
+
+#endif // GonkVideoDecoderManager_h_
diff --git a/dom/media/platforms/gonk/moz.build b/dom/media/platforms/gonk/moz.build
new file mode 100644
index 000000000..014594977
--- /dev/null
+++ b/dom/media/platforms/gonk/moz.build
@@ -0,0 +1,39 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'GonkAudioDecoderManager.h',
+ 'GonkDecoderModule.h',
+ 'GonkMediaDataDecoder.h',
+ 'GonkVideoDecoderManager.h',
+]
+UNIFIED_SOURCES += [
+ 'GonkAudioDecoderManager.cpp',
+ 'GonkDecoderModule.cpp',
+ 'GonkMediaDataDecoder.cpp',
+ 'GonkVideoDecoderManager.cpp',
+]
+LOCAL_INCLUDES += [
+ '/dom/media/omx/',
+]
+include('/ipc/chromium/chromium-config.mozbuild')
+
+# Suppress some GCC/clang warnings being treated as errors:
+# - about attributes on forward declarations for types that are already
+# defined, which complains about an important MOZ_EXPORT for android::AString
+# - about multi-character constants which are used in codec-related code
+if CONFIG['GNU_CC'] or CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-error=attributes',
+ '-Wno-error=multichar'
+ ]
+
+FINAL_LIBRARY = 'xul'
+
+LOCAL_INCLUDES += [
+ '%' + '%s/%s' % (CONFIG['ANDROID_SOURCE'], d) for d in [
+ 'frameworks/native/opengl/include',]
+]
diff --git a/dom/media/platforms/moz.build b/dom/media/platforms/moz.build
new file mode 100644
index 000000000..310820c91
--- /dev/null
+++ b/dom/media/platforms/moz.build
@@ -0,0 +1,92 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'agnostic/AgnosticDecoderModule.h',
+ 'agnostic/OpusDecoder.h',
+ 'agnostic/TheoraDecoder.h',
+ 'agnostic/VorbisDecoder.h',
+ 'agnostic/VPXDecoder.h',
+ 'MediaTelemetryConstants.h',
+ 'PDMFactory.h',
+ 'PlatformDecoderModule.h',
+ 'wrappers/FuzzingWrapper.h',
+ 'wrappers/H264Converter.h'
+]
+
+UNIFIED_SOURCES += [
+ 'agnostic/AgnosticDecoderModule.cpp',
+ 'agnostic/BlankDecoderModule.cpp',
+ 'agnostic/OpusDecoder.cpp',
+ 'agnostic/TheoraDecoder.cpp',
+ 'agnostic/VorbisDecoder.cpp',
+ 'agnostic/VPXDecoder.cpp',
+ 'agnostic/WAVDecoder.cpp',
+ 'PDMFactory.cpp',
+ 'wrappers/FuzzingWrapper.cpp',
+ 'wrappers/H264Converter.cpp'
+]
+
+DIRS += [
+ 'agnostic/eme',
+ 'agnostic/gmp',
+ 'omx'
+]
+
+if CONFIG['MOZ_WMF']:
+ DIRS += [ 'wmf' ];
+
+if CONFIG['MOZ_FFVPX'] or CONFIG['MOZ_FFMPEG']:
+ # common code to either FFmpeg or FFVPX
+ UNIFIED_SOURCES += [
+ 'ffmpeg/FFmpegLibWrapper.cpp',
+ ]
+
+if CONFIG['MOZ_FFVPX']:
+ DIRS += [
+ 'ffmpeg/ffvpx',
+ ]
+
+if CONFIG['MOZ_FFMPEG']:
+ DIRS += [
+ 'ffmpeg',
+ ]
+
+if CONFIG['MOZ_APPLEMEDIA']:
+ EXPORTS += [
+ 'apple/AppleDecoderModule.h',
+ ]
+ UNIFIED_SOURCES += [
+ 'apple/AppleATDecoder.cpp',
+ 'apple/AppleCMLinker.cpp',
+ 'apple/AppleDecoderModule.cpp',
+ 'apple/AppleVTDecoder.cpp',
+ 'apple/AppleVTLinker.cpp',
+ ]
+ OS_LIBS += [
+ '-framework AudioToolbox',
+ ]
+
+if CONFIG['MOZ_GONK_MEDIACODEC']:
+ DEFINES['MOZ_GONK_MEDIACODEC'] = True
+ DIRS += ['gonk']
+
+include('/ipc/chromium/chromium-config.mozbuild')
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android':
+ EXPORTS += [
+ 'android/AndroidDecoderModule.h',
+ ]
+ UNIFIED_SOURCES += [
+ 'android/AndroidDecoderModule.cpp',
+ 'android/MediaCodecDataDecoder.cpp',
+ 'android/RemoteDataDecoder.cpp',
+ ]
+
+FINAL_LIBRARY = 'xul'
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']
diff --git a/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp b/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
new file mode 100644
index 000000000..870566cf5
--- /dev/null
+++ b/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
@@ -0,0 +1,667 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "GonkOmxPlatformLayer.h"
+
+#include <binder/MemoryDealer.h>
+#include <cutils/properties.h>
+#include <media/IOMX.h>
+#include <media/stagefright/MediaCodecList.h>
+#include <utils/List.h>
+
+#include "mozilla/Monitor.h"
+#include "mozilla/layers/TextureClient.h"
+#include "mozilla/layers/GrallocTextureClient.h"
+#include "mozilla/layers/ImageBridgeChild.h"
+#include "mozilla/layers/TextureClientRecycleAllocator.h"
+
+#include "ImageContainer.h"
+#include "MediaInfo.h"
+#include "OmxDataDecoder.h"
+
+
+#ifdef LOG
+#undef LOG
+#endif
+
+#define LOG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("GonkOmxPlatformLayer(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+#define CHECK_ERR(err) \
+ if (err != OK) { \
+ LOG("error %d at %s", err, __func__); \
+ return NS_ERROR_FAILURE; \
+ } \
+
+// Android proprietary value.
+#define ANDROID_OMX_VIDEO_CodingVP8 (static_cast<OMX_VIDEO_CODINGTYPE>(9))
+
+using namespace android;
+
+namespace mozilla {
+
+// In Gonk, the software component name has prefix "OMX.google". It needs to
+// have a way to use hardware codec first.
+bool IsSoftwareCodec(const char* aComponentName)
+{
+ nsAutoCString str(aComponentName);
+ return (str.Find(NS_LITERAL_CSTRING("OMX.google.")) == -1 ? false : true);
+}
+
+bool IsInEmulator()
+{
+ char propQemu[PROPERTY_VALUE_MAX];
+ property_get("ro.kernel.qemu", propQemu, "");
+ return !strncmp(propQemu, "1", 1);
+}
+
+class GonkOmxObserver : public BnOMXObserver {
+public:
+ void onMessage(const omx_message& aMsg)
+ {
+ switch (aMsg.type) {
+ case omx_message::EVENT:
+ {
+ sp<GonkOmxObserver> self = this;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self, aMsg] () {
+ if (self->mClient && self->mClient->Event(aMsg.u.event_data.event,
+ aMsg.u.event_data.data1,
+ aMsg.u.event_data.data2))
+ {
+ return;
+ }
+ });
+ mTaskQueue->Dispatch(r.forget());
+ break;
+ }
+ case omx_message::EMPTY_BUFFER_DONE:
+ {
+ sp<GonkOmxObserver> self = this;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self, aMsg] () {
+ if (!self->mPromiseLayer) {
+ return;
+ }
+ BufferData::BufferID id = (BufferData::BufferID)aMsg.u.buffer_data.buffer;
+ self->mPromiseLayer->EmptyFillBufferDone(OMX_DirInput, id);
+ });
+ mTaskQueue->Dispatch(r.forget());
+ break;
+ }
+ case omx_message::FILL_BUFFER_DONE:
+ {
+ sp<GonkOmxObserver> self = this;
+ nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self, aMsg] () {
+ if (!self->mPromiseLayer) {
+ return;
+ }
+
+ // TODO: these codes look a little ugly, it'd be better to improve them.
+ RefPtr<BufferData> buf;
+ BufferData::BufferID id = (BufferData::BufferID)aMsg.u.extended_buffer_data.buffer;
+ buf = self->mPromiseLayer->FindAndRemoveBufferHolder(OMX_DirOutput, id);
+ MOZ_RELEASE_ASSERT(buf);
+ GonkBufferData* gonkBuffer = static_cast<GonkBufferData*>(buf.get());
+
+ // Copy the critical information to local buffer.
+ if (gonkBuffer->IsLocalBuffer()) {
+ gonkBuffer->mBuffer->nOffset = aMsg.u.extended_buffer_data.range_offset;
+ gonkBuffer->mBuffer->nFilledLen = aMsg.u.extended_buffer_data.range_length;
+ gonkBuffer->mBuffer->nFlags = aMsg.u.extended_buffer_data.flags;
+ gonkBuffer->mBuffer->nTimeStamp = aMsg.u.extended_buffer_data.timestamp;
+ }
+ self->mPromiseLayer->EmptyFillBufferDone(OMX_DirOutput, buf);
+ });
+ mTaskQueue->Dispatch(r.forget());
+ break;
+ }
+ default:
+ {
+ LOG("Unhandle event %d", aMsg.type);
+ }
+ }
+ }
+
+ void Shutdown()
+ {
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ mPromiseLayer = nullptr;
+ mClient = nullptr;
+ }
+
+ GonkOmxObserver(TaskQueue* aTaskQueue, OmxPromiseLayer* aPromiseLayer, OmxDataDecoder* aDataDecoder)
+ : mTaskQueue(aTaskQueue)
+ , mPromiseLayer(aPromiseLayer)
+ , mClient(aDataDecoder)
+ {}
+
+protected:
+ RefPtr<TaskQueue> mTaskQueue;
+ // TODO:
+ // we should combine both event handlers into one. And we should provide
+ // an unified way for event handling in OmxPlatformLayer class.
+ RefPtr<OmxPromiseLayer> mPromiseLayer;
+ RefPtr<OmxDataDecoder> mClient;
+};
+
+// This class allocates Gralloc buffer and manages TextureClient's recycle.
+class GonkTextureClientRecycleHandler : public layers::ITextureClientRecycleAllocator
+{
+ typedef MozPromise<layers::TextureClient*, nsresult, /* IsExclusive = */ true> TextureClientRecyclePromise;
+
+public:
+ GonkTextureClientRecycleHandler(OMX_VIDEO_PORTDEFINITIONTYPE& aDef)
+ : ITextureClientRecycleAllocator()
+ , mMonitor("GonkTextureClientRecycleHandler")
+ {
+ RefPtr<layers::ImageBridgeChild> bridge = layers::ImageBridgeChild::GetSingleton();
+
+ // Allocate Gralloc texture memory.
+ layers::GrallocTextureData* textureData =
+ layers::GrallocTextureData::Create(gfx::IntSize(aDef.nFrameWidth, aDef.nFrameHeight),
+ aDef.eColorFormat,
+ gfx::BackendType::NONE,
+ GraphicBuffer::USAGE_HW_TEXTURE | GraphicBuffer::USAGE_SW_READ_OFTEN,
+ bridge);
+
+ mGraphBuffer = textureData->GetGraphicBuffer();
+ MOZ_ASSERT(mGraphBuffer.get());
+
+ mTextureClient =
+ layers::TextureClient::CreateWithData(textureData,
+ layers::TextureFlags::DEALLOCATE_CLIENT | layers::TextureFlags::RECYCLE,
+ bridge);
+ MOZ_ASSERT(mTextureClient);
+
+ mPromise.SetMonitor(&mMonitor);
+ }
+
+ RefPtr<TextureClientRecyclePromise> WaitforRecycle()
+ {
+ MonitorAutoLock lock(mMonitor);
+ MOZ_ASSERT(!!mGraphBuffer.get());
+
+ mTextureClient->SetRecycleAllocator(this);
+ return mPromise.Ensure(__func__);
+ }
+
+ // DO NOT use smart pointer to receive TextureClient; otherwise it will
+ // distrupt the reference count.
+ layers::TextureClient* GetTextureClient()
+ {
+ return mTextureClient;
+ }
+
+ GraphicBuffer* GetGraphicBuffer()
+ {
+ MonitorAutoLock lock(mMonitor);
+ return mGraphBuffer.get();
+ }
+
+ // This function is called from layers thread.
+ void RecycleTextureClient(layers::TextureClient* aClient) override
+ {
+ MOZ_ASSERT(mTextureClient == aClient);
+
+ // Clearing the recycle allocator drops a reference, so make sure we stay alive
+ // for the duration of this function.
+ RefPtr<GonkTextureClientRecycleHandler> kungFuDeathGrip(this);
+ aClient->SetRecycleAllocator(nullptr);
+
+ {
+ MonitorAutoLock lock(mMonitor);
+ mPromise.ResolveIfExists(mTextureClient, __func__);
+ }
+ }
+
+ void Shutdown()
+ {
+ MonitorAutoLock lock(mMonitor);
+
+ mPromise.RejectIfExists(NS_ERROR_FAILURE, __func__);
+
+ // DO NOT clear TextureClient here.
+ // The ref count could be 1 and RecycleCallback will be called if we clear
+ // the ref count here. That breaks the whole mechanism. (RecycleCallback
+ // should be called from layers)
+ mGraphBuffer = nullptr;
+ }
+
+private:
+ // Because TextureClient calls RecycleCallbackl when ref count is 1, so we
+ // should hold only one reference here and use raw pointer when out of this
+ // class.
+ RefPtr<layers::TextureClient> mTextureClient;
+
+ // It is protected by mMonitor.
+ sp<android::GraphicBuffer> mGraphBuffer;
+
+ // It is protected by mMonitor.
+ MozPromiseHolder<TextureClientRecyclePromise> mPromise;
+
+ Monitor mMonitor;
+};
+
+GonkBufferData::GonkBufferData(bool aLiveInLocal,
+ GonkOmxPlatformLayer* aGonkPlatformLayer)
+ : BufferData(nullptr)
+ , mId(0)
+ , mGonkPlatformLayer(aGonkPlatformLayer)
+{
+ if (!aLiveInLocal) {
+ mMirrorBuffer = new OMX_BUFFERHEADERTYPE;
+ PodZero(mMirrorBuffer.get());
+ mBuffer = mMirrorBuffer.get();
+ }
+}
+
+void
+GonkBufferData::ReleaseBuffer()
+{
+ if (mTextureClientRecycleHandler) {
+ mTextureClientRecycleHandler->Shutdown();
+ mTextureClientRecycleHandler = nullptr;
+ }
+}
+
+nsresult
+GonkBufferData::InitSharedMemory(android::IMemory* aMemory)
+{
+ MOZ_RELEASE_ASSERT(mMirrorBuffer.get());
+
+ // aMemory is a IPC memory, it is safe to use it here.
+ mBuffer->pBuffer = (OMX_U8*)aMemory->pointer();
+ mBuffer->nAllocLen = aMemory->size();
+ return NS_OK;
+}
+
+nsresult
+GonkBufferData::InitLocalBuffer(IOMX::buffer_id aId)
+{
+ MOZ_RELEASE_ASSERT(!mMirrorBuffer.get());
+
+ mBuffer = (OMX_BUFFERHEADERTYPE*)aId;
+ return NS_OK;
+}
+
+nsresult
+GonkBufferData::InitGraphicBuffer(OMX_VIDEO_PORTDEFINITIONTYPE& aDef)
+{
+ mTextureClientRecycleHandler = new GonkTextureClientRecycleHandler(aDef);
+
+ if (!mTextureClientRecycleHandler->GetGraphicBuffer()) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+already_AddRefed<MediaData>
+GonkBufferData::GetPlatformMediaData()
+{
+ if (mGonkPlatformLayer->GetTrackInfo()->GetAsAudioInfo()) {
+ // This is audio decoding.
+ return nullptr;
+ }
+
+ if (!mTextureClientRecycleHandler) {
+ // There is no GraphicBuffer, it should fallback to normal YUV420 VideoData.
+ return nullptr;
+ }
+
+ VideoInfo info(*mGonkPlatformLayer->GetTrackInfo()->GetAsVideoInfo());
+ RefPtr<VideoData> data =
+ VideoData::CreateAndCopyIntoTextureClient(info,
+ 0,
+ mBuffer->nTimeStamp,
+ 1,
+ mTextureClientRecycleHandler->GetTextureClient(),
+ false,
+ 0,
+ info.ImageRect());
+ LOG("%p, disp width %d, height %d, pic width %d, height %d, time %ld",
+ this, info.mDisplay.width, info.mDisplay.height,
+ info.mImage.width, info.mImage.height, mBuffer->nTimeStamp);
+
+ // Get TextureClient Promise here to wait for resolved.
+ RefPtr<GonkBufferData> self(this);
+ mTextureClientRecycleHandler->WaitforRecycle()
+ ->Then(mGonkPlatformLayer->GetTaskQueue(), __func__,
+ [self] () {
+ self->mPromise.ResolveIfExists(self, __func__);
+ },
+ [self] () {
+ OmxBufferFailureHolder failure(OMX_ErrorUndefined, self);
+ self->mPromise.RejectIfExists(failure, __func__);
+ });
+
+ return data.forget();
+}
+
+GonkOmxPlatformLayer::GonkOmxPlatformLayer(OmxDataDecoder* aDataDecoder,
+ OmxPromiseLayer* aPromiseLayer,
+ TaskQueue* aTaskQueue,
+ layers::ImageContainer* aImageContainer)
+ : mTaskQueue(aTaskQueue)
+ , mImageContainer(aImageContainer)
+ , mNode(0)
+{
+ mOmxObserver = new GonkOmxObserver(mTaskQueue, aPromiseLayer, aDataDecoder);
+}
+
+nsresult
+GonkOmxPlatformLayer::AllocateOmxBuffer(OMX_DIRTYPE aType,
+ BUFFERLIST* aBufferList)
+{
+ MOZ_ASSERT(!mMemoryDealer[aType].get());
+
+ // Get port definition.
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ nsTArray<uint32_t> portindex;
+ GetPortIndices(portindex);
+ for (auto idx : portindex) {
+ InitOmxParameter(&def);
+ def.nPortIndex = idx;
+
+ OMX_ERRORTYPE err = GetParameter(OMX_IndexParamPortDefinition,
+ &def,
+ sizeof(OMX_PARAM_PORTDEFINITIONTYPE));
+ if (err != OMX_ErrorNone) {
+ return NS_ERROR_FAILURE;
+ } else if (def.eDir == aType) {
+ LOG("Get OMX_IndexParamPortDefinition: port: %d, type: %d", def.nPortIndex, def.eDir);
+ break;
+ }
+ }
+
+ size_t t = 0;
+
+ // Configure video output GraphicBuffer for video decoding acceleration.
+ bool useGralloc = false;
+ if (aType == OMX_DirOutput && mQuirks.test(kRequiresAllocateBufferOnOutputPorts) &&
+ (def.eDomain == OMX_PortDomainVideo)) {
+ if (NS_FAILED(EnableOmxGraphicBufferPort(def))) {
+ return NS_ERROR_FAILURE;
+ }
+
+ LOG("Enable OMX GraphicBuffer port, number %d, width %d, height %d", def.nBufferCountActual,
+ def.format.video.nFrameWidth, def.format.video.nFrameHeight);
+
+ useGralloc = true;
+
+ t = 1024; // MemoryDealer doesn't like 0, it's just for MemoryDealer happy.
+ } else {
+ t = def.nBufferCountActual * def.nBufferSize;
+ LOG("Buffer count %d, buffer size %d", def.nBufferCountActual, def.nBufferSize);
+ }
+
+ bool liveinlocal = mOmx->livesLocally(mNode, getpid());
+
+ // MemoryDealer is a IPC buffer allocator in Gonk because IOMX is actually
+ // lives in mediaserver.
+ mMemoryDealer[aType] = new MemoryDealer(t, "Gecko-OMX");
+ for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
+ RefPtr<GonkBufferData> buffer;
+ IOMX::buffer_id bufferID;
+ status_t st;
+ nsresult rv;
+
+ buffer = new GonkBufferData(liveinlocal, this);
+ if (useGralloc) {
+ // Buffer is lived remotely. Use GraphicBuffer for decoded video frame display.
+ rv = buffer->InitGraphicBuffer(def.format.video);
+ NS_ENSURE_SUCCESS(rv, rv);
+ st = mOmx->useGraphicBuffer(mNode,
+ def.nPortIndex,
+ buffer->mTextureClientRecycleHandler->GetGraphicBuffer(),
+ &bufferID);
+ CHECK_ERR(st);
+ } else {
+ sp<IMemory> mem = mMemoryDealer[aType]->allocate(def.nBufferSize);
+ MOZ_ASSERT(mem.get());
+
+ if ((mQuirks.test(kRequiresAllocateBufferOnInputPorts) && aType == OMX_DirInput) ||
+ (mQuirks.test(kRequiresAllocateBufferOnOutputPorts) && aType == OMX_DirOutput)) {
+ // Buffer is lived remotely. We allocate a local OMX_BUFFERHEADERTYPE
+ // as the mirror of the remote OMX_BUFFERHEADERTYPE.
+ st = mOmx->allocateBufferWithBackup(mNode, aType, mem, &bufferID);
+ CHECK_ERR(st);
+ rv = buffer->InitSharedMemory(mem.get());
+ NS_ENSURE_SUCCESS(rv, rv);
+ } else {
+ // Buffer is lived locally, bufferID is the actually OMX_BUFFERHEADERTYPE
+ // pointer.
+ st = mOmx->useBuffer(mNode, aType, mem, &bufferID);
+ CHECK_ERR(st);
+ rv = buffer->InitLocalBuffer(bufferID);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+ }
+
+ rv = buffer->SetBufferId(bufferID);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ aBufferList->AppendElement(buffer);
+ }
+
+ return NS_OK;
+}
+
+nsresult
+GonkOmxPlatformLayer::ReleaseOmxBuffer(OMX_DIRTYPE aType,
+ BUFFERLIST* aBufferList)
+{
+ status_t st;
+ uint32_t len = aBufferList->Length();
+ for (uint32_t i = 0; i < len; i++) {
+ GonkBufferData* buffer = static_cast<GonkBufferData*>(aBufferList->ElementAt(i).get());
+ IOMX::buffer_id id = (OMX_BUFFERHEADERTYPE*) buffer->ID();
+ st = mOmx->freeBuffer(mNode, aType, id);
+ if (st != OK) {
+ return NS_ERROR_FAILURE;
+ }
+ buffer->ReleaseBuffer();
+ }
+ aBufferList->Clear();
+ mMemoryDealer[aType].clear();
+
+ return NS_OK;
+}
+
+nsresult
+GonkOmxPlatformLayer::EnableOmxGraphicBufferPort(OMX_PARAM_PORTDEFINITIONTYPE& aDef)
+{
+ status_t st;
+
+ st = mOmx->enableGraphicBuffers(mNode, aDef.nPortIndex, OMX_TRUE);
+ CHECK_ERR(st);
+
+ return NS_OK;
+}
+
+OMX_ERRORTYPE
+GonkOmxPlatformLayer::GetState(OMX_STATETYPE* aType)
+{
+ return (OMX_ERRORTYPE)mOmx->getState(mNode, aType);
+}
+
+OMX_ERRORTYPE
+GonkOmxPlatformLayer::GetParameter(OMX_INDEXTYPE aParamIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize)
+{
+ return (OMX_ERRORTYPE)mOmx->getParameter(mNode,
+ aParamIndex,
+ aComponentParameterStructure,
+ aComponentParameterSize);
+}
+
+OMX_ERRORTYPE
+GonkOmxPlatformLayer::SetParameter(OMX_INDEXTYPE aParamIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize)
+{
+ return (OMX_ERRORTYPE)mOmx->setParameter(mNode,
+ aParamIndex,
+ aComponentParameterStructure,
+ aComponentParameterSize);
+}
+
+nsresult
+GonkOmxPlatformLayer::Shutdown()
+{
+ mOmx->freeNode(mNode);
+ mOmxObserver->Shutdown();
+ mOmxObserver = nullptr;
+ mOmxClient.disconnect();
+
+ return NS_OK;
+}
+
+OMX_ERRORTYPE
+GonkOmxPlatformLayer::InitOmxToStateLoaded(const TrackInfo* aInfo)
+{
+ mInfo = aInfo;
+ status_t err = mOmxClient.connect();
+ if (err != OK) {
+ return OMX_ErrorUndefined;
+ }
+ mOmx = mOmxClient.interface();
+ if (!mOmx.get()) {
+ return OMX_ErrorUndefined;
+ }
+
+ LOG("find componenet for mime type %s", mInfo->mMimeType.Data());
+
+ nsTArray<ComponentInfo> components;
+ if (FindComponents(mInfo->mMimeType, &components)) {
+ for (auto comp : components) {
+ if (LoadComponent(comp)) {
+ return OMX_ErrorNone;
+ }
+ }
+ }
+
+ LOG("no component is loaded");
+ return OMX_ErrorUndefined;
+}
+
+OMX_ERRORTYPE
+GonkOmxPlatformLayer::EmptyThisBuffer(BufferData* aData)
+{
+ return (OMX_ERRORTYPE)mOmx->emptyBuffer(mNode,
+ (IOMX::buffer_id)aData->ID(),
+ aData->mBuffer->nOffset,
+ aData->mBuffer->nFilledLen,
+ aData->mBuffer->nFlags,
+ aData->mBuffer->nTimeStamp);
+}
+
+OMX_ERRORTYPE
+GonkOmxPlatformLayer::FillThisBuffer(BufferData* aData)
+{
+ return (OMX_ERRORTYPE)mOmx->fillBuffer(mNode, (IOMX::buffer_id)aData->ID());
+}
+
+OMX_ERRORTYPE
+GonkOmxPlatformLayer::SendCommand(OMX_COMMANDTYPE aCmd,
+ OMX_U32 aParam1,
+ OMX_PTR aCmdData)
+{
+ return (OMX_ERRORTYPE)mOmx->sendCommand(mNode, aCmd, aParam1);
+}
+
+bool
+GonkOmxPlatformLayer::LoadComponent(const ComponentInfo& aComponent)
+{
+ status_t err = mOmx->allocateNode(aComponent.mName, mOmxObserver, &mNode);
+ if (err == OK) {
+ mQuirks = aComponent.mQuirks;
+ LOG("Load OpenMax component %s, alloc input %d, alloc output %d, live locally %d",
+ aComponent.mName, mQuirks.test(kRequiresAllocateBufferOnInputPorts),
+ mQuirks.test(kRequiresAllocateBufferOnOutputPorts),
+ mOmx->livesLocally(mNode, getpid()));
+ return true;
+ }
+ return false;
+}
+
+layers::ImageContainer*
+GonkOmxPlatformLayer::GetImageContainer()
+{
+ return mImageContainer;
+}
+
+const TrackInfo*
+GonkOmxPlatformLayer::GetTrackInfo()
+{
+ return mInfo;
+}
+
+bool
+GonkOmxPlatformLayer::FindComponents(const nsACString& aMimeType,
+ nsTArray<ComponentInfo>* aComponents)
+{
+ static const MediaCodecList* codecs = MediaCodecList::getInstance();
+
+ bool useHardwareCodecOnly = false;
+
+ // H264 and H263 has different profiles, software codec doesn't support high profile.
+ // So we use hardware codec only.
+ if (!IsInEmulator() &&
+ (aMimeType.EqualsLiteral("video/avc") ||
+ aMimeType.EqualsLiteral("video/mp4") ||
+ aMimeType.EqualsLiteral("video/mp4v-es") ||
+ aMimeType.EqualsLiteral("video/3gp"))) {
+ useHardwareCodecOnly = true;
+ }
+
+ const char* mime = aMimeType.Data();
+ // Translate VP8 MIME type to Android format.
+ if (aMimeType.EqualsLiteral("video/webm; codecs=vp8")) {
+ mime = "video/x-vnd.on2.vp8";
+ }
+
+ size_t start = 0;
+ bool found = false;
+ while (true) {
+ ssize_t index = codecs->findCodecByType(mime, false /* encoder */, start);
+ if (index < 0) {
+ break;
+ }
+ start = index + 1;
+
+ const char* name = codecs->getCodecName(index);
+ if (IsSoftwareCodec(name) && useHardwareCodecOnly) {
+ continue;
+ }
+
+ found = true;
+
+ if (!aComponents) {
+ continue;
+ }
+ ComponentInfo* comp = aComponents->AppendElement();
+ comp->mName = name;
+ if (codecs->codecHasQuirk(index, "requires-allocate-on-input-ports")) {
+ comp->mQuirks.set(kRequiresAllocateBufferOnInputPorts);
+ }
+ if (codecs->codecHasQuirk(index, "requires-allocate-on-output-ports")) {
+ comp->mQuirks.set(kRequiresAllocateBufferOnOutputPorts);
+ }
+ }
+
+ return found;
+}
+
+OMX_VIDEO_CODINGTYPE
+GonkOmxPlatformLayer::CompressionFormat()
+{
+ MOZ_ASSERT(mInfo);
+
+ return mInfo->mMimeType.EqualsLiteral("video/webm; codecs=vp8") ?
+ ANDROID_OMX_VIDEO_CodingVP8 : OmxPlatformLayer::CompressionFormat();
+}
+
+} // mozilla
diff --git a/dom/media/platforms/omx/GonkOmxPlatformLayer.h b/dom/media/platforms/omx/GonkOmxPlatformLayer.h
new file mode 100644
index 000000000..aaa8c654d
--- /dev/null
+++ b/dom/media/platforms/omx/GonkOmxPlatformLayer.h
@@ -0,0 +1,205 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(GonkOmxPlatformLayer_h_)
+#define GonkOmxPlatformLayer_h_
+
+#pragma GCC visibility push(default)
+
+#include <bitset>
+
+#include <utils/RefBase.h>
+#include <media/stagefright/OMXClient.h>
+#include "nsAutoPtr.h"
+
+#include "OMX_Component.h"
+
+#include "OmxPlatformLayer.h"
+
+class nsACString;
+
+namespace android {
+class IMemory;
+class MemoryDealer;
+}
+
+namespace mozilla {
+
+class GonkOmxObserver;
+class GonkOmxPlatformLayer;
+class GonkTextureClientRecycleHandler;
+
+/*
+ * Due to Android's omx node could live in local process (client) or remote
+ * process (mediaserver). And there are 3 kinds of buffer in Android OMX.
+ *
+ * 1.
+ * When buffer is in local process, the IOMX::buffer_id is OMX_BUFFERHEADERTYPE
+ * pointer actually, it is safe to use it directly.
+ *
+ * 2.
+ * When buffer is in remote process, the OMX_BUFFERHEADERTYPE pointer is 'IN' the
+ * remote process. It can't be used in local process, so here it allocates a
+ * local OMX_BUFFERHEADERTYPE. The raw/decoded data is in the android shared
+ * memory, IMemory.
+ *
+ * 3.
+ * When buffer is in remote process for the display output port. It uses
+ * GraphicBuffer to accelerate the decoding and display.
+ *
+ */
+class GonkBufferData : public OmxPromiseLayer::BufferData {
+protected:
+ virtual ~GonkBufferData() {}
+
+public:
+ GonkBufferData(bool aLiveInLocal,
+ GonkOmxPlatformLayer* aLayer);
+
+ BufferID ID() override
+ {
+ return mId;
+ }
+
+ already_AddRefed<MediaData> GetPlatformMediaData() override;
+
+ bool IsLocalBuffer()
+ {
+ return !!mMirrorBuffer.get();
+ }
+
+ void ReleaseBuffer();
+
+ nsresult SetBufferId(android::IOMX::buffer_id aId)
+ {
+ mId = aId;
+ return NS_OK;
+ }
+
+ // The mBuffer is in local process. And aId is actually the OMX_BUFFERHEADERTYPE
+ // pointer. It doesn't need a mirror buffer.
+ nsresult InitLocalBuffer(android::IOMX::buffer_id aId);
+
+ // aMemory is an IPC based memory which will be used as the pBuffer in
+ // mBuffer. And the mBuffer will be the mirror OMX_BUFFERHEADERTYPE
+ // of the one in the remote process.
+ nsresult InitSharedMemory(android::IMemory* aMemory);
+
+ // GraphicBuffer is for video decoding acceleration on output port.
+ // Then mBuffer is the mirror OMX_BUFFERHEADERTYPE of the one in the remote
+ // process.
+ nsresult InitGraphicBuffer(OMX_VIDEO_PORTDEFINITIONTYPE& aDef);
+
+ // Android OMX uses this id to pass the buffer between OMX component and
+ // client.
+ android::IOMX::buffer_id mId;
+
+ // mMirrorBuffer are used only when the omx node is in mediaserver.
+ // Due to IPC problem, the mId is the OMX_BUFFERHEADERTYPE address in mediaserver.
+ // It can't mapping to client process, so we need a local OMX_BUFFERHEADERTYPE
+ // here to mirror the remote OMX_BUFFERHEADERTYPE in mediaserver.
+ nsAutoPtr<OMX_BUFFERHEADERTYPE> mMirrorBuffer;
+
+ // It creates GraphicBuffer and manages TextureClient.
+ RefPtr<GonkTextureClientRecycleHandler> mTextureClientRecycleHandler;
+
+ GonkOmxPlatformLayer* mGonkPlatformLayer;
+};
+
+class GonkOmxPlatformLayer : public OmxPlatformLayer {
+public:
+ enum {
+ kRequiresAllocateBufferOnInputPorts = 0,
+ kRequiresAllocateBufferOnOutputPorts,
+ QUIRKS,
+ };
+ typedef std::bitset<QUIRKS> Quirks;
+
+ struct ComponentInfo {
+ const char* mName;
+ Quirks mQuirks;
+ };
+
+ GonkOmxPlatformLayer(OmxDataDecoder* aDataDecoder,
+ OmxPromiseLayer* aPromiseLayer,
+ TaskQueue* aTaskQueue,
+ layers::ImageContainer* aImageContainer);
+
+ nsresult AllocateOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBufferList) override;
+
+ nsresult ReleaseOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBufferList) override;
+
+ OMX_ERRORTYPE GetState(OMX_STATETYPE* aType) override;
+
+ OMX_ERRORTYPE GetParameter(OMX_INDEXTYPE aParamIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize) override;
+
+ OMX_ERRORTYPE SetParameter(OMX_INDEXTYPE nIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize) override;
+
+ OMX_ERRORTYPE InitOmxToStateLoaded(const TrackInfo* aInfo) override;
+
+ OMX_ERRORTYPE EmptyThisBuffer(BufferData* aData) override;
+
+ OMX_ERRORTYPE FillThisBuffer(BufferData* aData) override;
+
+ OMX_ERRORTYPE SendCommand(OMX_COMMANDTYPE aCmd,
+ OMX_U32 aParam1,
+ OMX_PTR aCmdData) override;
+
+ nsresult Shutdown() override;
+
+ static bool FindComponents(const nsACString& aMimeType,
+ nsTArray<ComponentInfo>* aComponents = nullptr);
+
+ // Android/QCOM decoder uses its own OMX_VIDEO_CodingVP8 definition in
+ // frameworks/native/media/include/openmax/OMX_Video.h, not the one defined
+ // in OpenMAX v1.1.2 OMX_VideoExt.h
+ OMX_VIDEO_CODINGTYPE CompressionFormat() override;
+
+protected:
+ friend GonkBufferData;
+
+ layers::ImageContainer* GetImageContainer();
+
+ const TrackInfo* GetTrackInfo();
+
+ TaskQueue* GetTaskQueue()
+ {
+ return mTaskQueue;
+ }
+
+ nsresult EnableOmxGraphicBufferPort(OMX_PARAM_PORTDEFINITIONTYPE& aDef);
+
+ bool LoadComponent(const ComponentInfo& aComponent);
+
+ friend class GonkOmxObserver;
+
+ RefPtr<TaskQueue> mTaskQueue;
+
+ RefPtr<layers::ImageContainer> mImageContainer;
+
+ // OMX_DirInput is 0, OMX_DirOutput is 1.
+ android::sp<android::MemoryDealer> mMemoryDealer[2];
+
+ android::sp<GonkOmxObserver> mOmxObserver;
+
+ android::sp<android::IOMX> mOmx;
+
+ android::IOMX::node_id mNode;
+
+ android::OMXClient mOmxClient;
+
+ Quirks mQuirks;
+};
+
+}
+
+#pragma GCC visibility pop
+
+#endif // GonkOmxPlatformLayer_h_
diff --git a/dom/media/platforms/omx/OmxDataDecoder.cpp b/dom/media/platforms/omx/OmxDataDecoder.cpp
new file mode 100644
index 000000000..33f9dad30
--- /dev/null
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -0,0 +1,1038 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OmxDataDecoder.h"
+
+#include "OMX_Audio.h"
+#include "OMX_Component.h"
+#include "OMX_Types.h"
+
+#include "OmxPlatformLayer.h"
+
+
+#ifdef LOG
+#undef LOG
+#undef LOGL
+#endif
+
+#define LOG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("OmxDataDecoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+#define LOGL(arg, ...) \
+ { \
+ void* p = self; \
+ MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, \
+ ("OmxDataDecoder(%p)::%s: " arg, p, __func__, ##__VA_ARGS__)); \
+ }
+
+#define CHECK_OMX_ERR(err) \
+ if (err != OMX_ErrorNone) { \
+ NotifyError(err, __func__);\
+ return; \
+ } \
+
+namespace mozilla {
+
+static const char*
+StateTypeToStr(OMX_STATETYPE aType)
+{
+ MOZ_ASSERT(aType == OMX_StateLoaded ||
+ aType == OMX_StateIdle ||
+ aType == OMX_StateExecuting ||
+ aType == OMX_StatePause ||
+ aType == OMX_StateWaitForResources ||
+ aType == OMX_StateInvalid);
+
+ switch (aType) {
+ case OMX_StateLoaded:
+ return "OMX_StateLoaded";
+ case OMX_StateIdle:
+ return "OMX_StateIdle";
+ case OMX_StateExecuting:
+ return "OMX_StateExecuting";
+ case OMX_StatePause:
+ return "OMX_StatePause";
+ case OMX_StateWaitForResources:
+ return "OMX_StateWaitForResources";
+ case OMX_StateInvalid:
+ return "OMX_StateInvalid";
+ default:
+ return "Unknown";
+ }
+}
+
+// A helper class to retrieve AudioData or VideoData.
+class MediaDataHelper {
+protected:
+ virtual ~MediaDataHelper() {}
+
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataHelper)
+
+ MediaDataHelper(const TrackInfo* aTrackInfo,
+ layers::ImageContainer* aImageContainer,
+ OmxPromiseLayer* aOmxLayer);
+
+ already_AddRefed<MediaData> GetMediaData(BufferData* aBufferData, bool& aPlatformDepenentData);
+
+protected:
+ already_AddRefed<AudioData> CreateAudioData(BufferData* aBufferData);
+
+ already_AddRefed<VideoData> CreateYUV420VideoData(BufferData* aBufferData);
+
+ const TrackInfo* mTrackInfo;
+
+ OMX_PARAM_PORTDEFINITIONTYPE mOutputPortDef;
+
+ // audio output
+ MediaQueue<AudioData> mAudioQueue;
+
+ AudioCompactor mAudioCompactor;
+
+ // video output
+ RefPtr<layers::ImageContainer> mImageContainer;
+};
+
+OmxDataDecoder::OmxDataDecoder(const TrackInfo& aTrackInfo,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer)
+ : mMonitor("OmxDataDecoder")
+ , mOmxTaskQueue(CreateMediaDecodeTaskQueue())
+ , mImageContainer(aImageContainer)
+ , mWatchManager(this, mOmxTaskQueue)
+ , mOmxState(OMX_STATETYPE::OMX_StateInvalid, "OmxDataDecoder::mOmxState")
+ , mTrackInfo(aTrackInfo.Clone())
+ , mFlushing(false)
+ , mShuttingDown(false)
+ , mCheckingInputExhausted(false)
+ , mPortSettingsChanged(-1, "OmxDataDecoder::mPortSettingsChanged")
+ , mCallback(aCallback)
+{
+ LOG("");
+ mOmxLayer = new OmxPromiseLayer(mOmxTaskQueue, this, aImageContainer);
+
+ mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::InitializationTask));
+}
+
+OmxDataDecoder::~OmxDataDecoder()
+{
+ LOG("");
+}
+
+void
+OmxDataDecoder::InitializationTask()
+{
+ mWatchManager.Watch(mOmxState, &OmxDataDecoder::OmxStateRunner);
+ mWatchManager.Watch(mPortSettingsChanged, &OmxDataDecoder::PortSettingsChanged);
+}
+
+void
+OmxDataDecoder::EndOfStream()
+{
+ LOG("");
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ mFlushing = true;
+ RefPtr<OmxDataDecoder> self = this;
+ mOmxLayer->SendCommand(OMX_CommandFlush, OMX_ALL, nullptr)
+ ->Then(mReaderTaskQueue, __func__,
+ [self] () {
+ self->mFlushing = false;
+ self->mCallback->DrainComplete();
+ },
+ [self] () {
+ self->mFlushing = false;
+ self->mCallback->DrainComplete();
+ });
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+OmxDataDecoder::Init()
+{
+ LOG("");
+ mReaderTaskQueue = AbstractThread::GetCurrent()->AsTaskQueue();
+ MOZ_ASSERT(mReaderTaskQueue);
+
+ RefPtr<InitPromise> p = mInitPromise.Ensure(__func__);
+ RefPtr<OmxDataDecoder> self = this;
+
+ // TODO: it needs to get permission from resource manager before allocating
+ // Omx component.
+ InvokeAsync(mOmxTaskQueue, mOmxLayer.get(), __func__, &OmxPromiseLayer::Init,
+ mTrackInfo.get())
+ ->Then(mOmxTaskQueue, __func__,
+ [self] () {
+ // Omx state should be OMX_StateIdle.
+ self->mOmxState = self->mOmxLayer->GetState();
+ MOZ_ASSERT(self->mOmxState != OMX_StateIdle);
+ },
+ [self] () {
+ self->RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ });
+
+ return p;
+}
+
+void
+OmxDataDecoder::Input(MediaRawData* aSample)
+{
+ LOG("sample %p", aSample);
+ MOZ_ASSERT(mInitPromise.IsEmpty());
+
+ RefPtr<OmxDataDecoder> self = this;
+ RefPtr<MediaRawData> sample = aSample;
+
+ nsCOMPtr<nsIRunnable> r =
+ NS_NewRunnableFunction([self, sample] () {
+ self->mMediaRawDatas.AppendElement(sample);
+
+ // Start to fill/empty buffers.
+ if (self->mOmxState == OMX_StateIdle ||
+ self->mOmxState == OMX_StateExecuting) {
+ self->FillAndEmptyBuffers();
+ }
+ });
+ mOmxTaskQueue->Dispatch(r.forget());
+}
+
+void
+OmxDataDecoder::Flush()
+{
+ LOG("");
+
+ mFlushing = true;
+
+ mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::DoFlush));
+
+ // According to the definition of Flush() in PDM:
+ // "the decoder must be ready to accept new input for decoding".
+ // So it needs to wait for the Omx to complete the flush command.
+ MonitorAutoLock lock(mMonitor);
+ while (mFlushing) {
+ lock.Wait();
+ }
+}
+
+void
+OmxDataDecoder::Drain()
+{
+ LOG("");
+
+ mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::SendEosBuffer));
+}
+
+void
+OmxDataDecoder::Shutdown()
+{
+ LOG("");
+
+ mShuttingDown = true;
+
+ mOmxTaskQueue->Dispatch(NewRunnableMethod(this, &OmxDataDecoder::DoAsyncShutdown));
+
+ {
+ // DoAsyncShutdown() will be running for a while, it could be still running
+ // when reader releasing the decoder and then it causes problem. To avoid it,
+ // Shutdown() must block until DoAsyncShutdown() is completed.
+ MonitorAutoLock lock(mMonitor);
+ while (mShuttingDown) {
+ lock.Wait();
+ }
+ }
+
+ mOmxTaskQueue->BeginShutdown();
+ mOmxTaskQueue->AwaitShutdownAndIdle();
+}
+
+void
+OmxDataDecoder::DoAsyncShutdown()
+{
+ LOG("");
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(!mFlushing);
+
+ mWatchManager.Unwatch(mOmxState, &OmxDataDecoder::OmxStateRunner);
+ mWatchManager.Unwatch(mPortSettingsChanged, &OmxDataDecoder::PortSettingsChanged);
+
+ // Flush to all ports, so all buffers can be returned from component.
+ RefPtr<OmxDataDecoder> self = this;
+ mOmxLayer->SendCommand(OMX_CommandFlush, OMX_ALL, nullptr)
+ ->Then(mOmxTaskQueue, __func__,
+ [self] () -> RefPtr<OmxCommandPromise> {
+ LOGL("DoAsyncShutdown: flush complete");
+ return self->mOmxLayer->SendCommand(OMX_CommandStateSet, OMX_StateIdle, nullptr);
+ },
+ [self] () {
+ self->mOmxLayer->Shutdown();
+ })
+ ->CompletionPromise()
+ ->Then(mOmxTaskQueue, __func__,
+ [self] () -> RefPtr<OmxCommandPromise> {
+ RefPtr<OmxCommandPromise> p =
+ self->mOmxLayer->SendCommand(OMX_CommandStateSet, OMX_StateLoaded, nullptr);
+
+ // According to spec 3.1.1.2.2.1:
+ // OMX_StateLoaded needs to be sent before releasing buffers.
+ // And state transition from OMX_StateIdle to OMX_StateLoaded
+ // is completed when all of the buffers have been removed
+ // from the component.
+ // Here the buffer promises are not resolved due to displaying
+ // in layer, it needs to wait before the layer returns the
+ // buffers.
+ LOGL("DoAsyncShutdown: releasing buffers...");
+ self->ReleaseBuffers(OMX_DirInput);
+ self->ReleaseBuffers(OMX_DirOutput);
+
+ return p;
+ },
+ [self] () {
+ self->mOmxLayer->Shutdown();
+ })
+ ->CompletionPromise()
+ ->Then(mOmxTaskQueue, __func__,
+ [self] () {
+ LOGL("DoAsyncShutdown: OMX_StateLoaded, it is safe to shutdown omx");
+ self->mOmxLayer->Shutdown();
+ self->mWatchManager.Shutdown();
+ self->mOmxLayer = nullptr;
+ self->mMediaDataHelper = nullptr;
+
+ MonitorAutoLock lock(self->mMonitor);
+ self->mShuttingDown = false;
+ self->mMonitor.Notify();
+ },
+ [self] () {
+ self->mOmxLayer->Shutdown();
+ self->mWatchManager.Shutdown();
+ self->mOmxLayer = nullptr;
+ self->mMediaDataHelper = nullptr;
+
+ MonitorAutoLock lock(self->mMonitor);
+ self->mShuttingDown = false;
+ self->mMonitor.Notify();
+ });
+}
+
+void
+OmxDataDecoder::FillBufferDone(BufferData* aData)
+{
+ MOZ_ASSERT(!aData || aData->mStatus == BufferData::BufferStatus::OMX_CLIENT);
+
+ // Don't output sample when flush or shutting down, especially for video
+ // decoded frame. Because video decoded frame has a promise in BufferData
+ // waiting for layer to resolve it via recycle callback on Gonk, if other
+ // module doesn't send it to layer, it will cause a unresolved promise and
+ // waiting for resolve infinitely.
+ if (mFlushing || mShuttingDown) {
+ LOG("mFlush or mShuttingDown, drop data");
+ aData->mStatus = BufferData::BufferStatus::FREE;
+ return;
+ }
+
+ if (aData->mBuffer->nFlags & OMX_BUFFERFLAG_EOS) {
+ // Reach eos, it's an empty data so it doesn't need to output.
+ EndOfStream();
+ aData->mStatus = BufferData::BufferStatus::FREE;
+ } else {
+ Output(aData);
+ FillAndEmptyBuffers();
+ }
+}
+
+void
+OmxDataDecoder::Output(BufferData* aData)
+{
+ if (!mMediaDataHelper) {
+ mMediaDataHelper = new MediaDataHelper(mTrackInfo.get(), mImageContainer, mOmxLayer);
+ }
+
+ bool isPlatformData = false;
+ RefPtr<MediaData> data = mMediaDataHelper->GetMediaData(aData, isPlatformData);
+ if (!data) {
+ aData->mStatus = BufferData::BufferStatus::FREE;
+ return;
+ }
+
+ if (isPlatformData) {
+ // If the MediaData is platform dependnet data, it's mostly a kind of
+ // limited resource, for example, GraphicBuffer on Gonk. So we use promise
+ // to notify when the resource is free.
+ aData->mStatus = BufferData::BufferStatus::OMX_CLIENT_OUTPUT;
+
+ MOZ_RELEASE_ASSERT(aData->mPromise.IsEmpty());
+ RefPtr<OmxBufferPromise> p = aData->mPromise.Ensure(__func__);
+
+ RefPtr<OmxDataDecoder> self = this;
+ RefPtr<BufferData> buffer = aData;
+ p->Then(mOmxTaskQueue, __func__,
+ [self, buffer] () {
+ MOZ_RELEASE_ASSERT(buffer->mStatus == BufferData::BufferStatus::OMX_CLIENT_OUTPUT);
+ buffer->mStatus = BufferData::BufferStatus::FREE;
+ self->FillAndEmptyBuffers();
+ },
+ [buffer] () {
+ MOZ_RELEASE_ASSERT(buffer->mStatus == BufferData::BufferStatus::OMX_CLIENT_OUTPUT);
+ buffer->mStatus = BufferData::BufferStatus::FREE;
+ });
+ } else {
+ aData->mStatus = BufferData::BufferStatus::FREE;
+ }
+
+ mCallback->Output(data);
+}
+
+void
+OmxDataDecoder::FillBufferFailure(OmxBufferFailureHolder aFailureHolder)
+{
+ NotifyError(aFailureHolder.mError, __func__);
+}
+
+void
+OmxDataDecoder::EmptyBufferDone(BufferData* aData)
+{
+ MOZ_ASSERT(!aData || aData->mStatus == BufferData::BufferStatus::OMX_CLIENT);
+
+ // Nothing to do when status of input buffer is OMX_CLIENT.
+ aData->mStatus = BufferData::BufferStatus::FREE;
+ FillAndEmptyBuffers();
+
+ // There is no way to know if component gets enough raw samples to generate
+ // output, especially for video decoding. So here it needs to request raw
+ // samples aggressively.
+ if (!mCheckingInputExhausted && !mMediaRawDatas.Length()) {
+ mCheckingInputExhausted = true;
+
+ RefPtr<OmxDataDecoder> self = this;
+ nsCOMPtr<nsIRunnable> r =
+ NS_NewRunnableFunction([self] () {
+ MOZ_ASSERT(self->mOmxTaskQueue->IsCurrentThreadIn());
+
+ self->mCheckingInputExhausted = false;
+
+ if (self->mMediaRawDatas.Length()) {
+ return;
+ }
+
+ LOGL("Call InputExhausted()");
+ self->mCallback->InputExhausted();
+ });
+
+ mOmxTaskQueue->Dispatch(r.forget());
+ }
+}
+
+void
+OmxDataDecoder::EmptyBufferFailure(OmxBufferFailureHolder aFailureHolder)
+{
+ NotifyError(aFailureHolder.mError, __func__);
+}
+
+void
+OmxDataDecoder::NotifyError(OMX_ERRORTYPE aOmxError, const char* aLine, const MediaResult& aError)
+{
+ LOG("NotifyError %d (%d) at %s", aOmxError, aError.Code(), aLine);
+ mCallback->Error(aError);
+}
+
+void
+OmxDataDecoder::FillAndEmptyBuffers()
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(mOmxState == OMX_StateExecuting);
+
+ // During the port setting changed, it is forbidden to do any buffer operation.
+ if (mPortSettingsChanged != -1 || mShuttingDown || mFlushing) {
+ return;
+ }
+
+ // Trigger input port.
+ while (!!mMediaRawDatas.Length()) {
+ // input buffer must be used by component if there is data available.
+ RefPtr<BufferData> inbuf = FindAvailableBuffer(OMX_DirInput);
+ if (!inbuf) {
+ LOG("no input buffer!");
+ break;
+ }
+
+ RefPtr<MediaRawData> data = mMediaRawDatas[0];
+ // Buffer size should large enough for raw data.
+ MOZ_RELEASE_ASSERT(inbuf->mBuffer->nAllocLen >= data->Size());
+
+ memcpy(inbuf->mBuffer->pBuffer, data->Data(), data->Size());
+ inbuf->mBuffer->nFilledLen = data->Size();
+ inbuf->mBuffer->nOffset = 0;
+ inbuf->mBuffer->nFlags = inbuf->mBuffer->nAllocLen > data->Size() ?
+ OMX_BUFFERFLAG_ENDOFFRAME : 0;
+ inbuf->mBuffer->nTimeStamp = data->mTime;
+ if (data->Size()) {
+ inbuf->mRawData = mMediaRawDatas[0];
+ } else {
+ LOG("send EOS buffer");
+ inbuf->mBuffer->nFlags |= OMX_BUFFERFLAG_EOS;
+ }
+
+ LOG("feed sample %p to omx component, len %d, flag %X", data.get(),
+ inbuf->mBuffer->nFilledLen, inbuf->mBuffer->nFlags);
+ mOmxLayer->EmptyBuffer(inbuf)->Then(mOmxTaskQueue, __func__, this,
+ &OmxDataDecoder::EmptyBufferDone,
+ &OmxDataDecoder::EmptyBufferFailure);
+ mMediaRawDatas.RemoveElementAt(0);
+ }
+
+ // Trigger output port.
+ while (true) {
+ RefPtr<BufferData> outbuf = FindAvailableBuffer(OMX_DirOutput);
+ if (!outbuf) {
+ break;
+ }
+
+ mOmxLayer->FillBuffer(outbuf)->Then(mOmxTaskQueue, __func__, this,
+ &OmxDataDecoder::FillBufferDone,
+ &OmxDataDecoder::FillBufferFailure);
+ }
+}
+
+OmxPromiseLayer::BufferData*
+OmxDataDecoder::FindAvailableBuffer(OMX_DIRTYPE aType)
+{
+ BUFFERLIST* buffers = GetBuffers(aType);
+
+ for (uint32_t i = 0; i < buffers->Length(); i++) {
+ BufferData* buf = buffers->ElementAt(i);
+ if (buf->mStatus == BufferData::BufferStatus::FREE) {
+ return buf;
+ }
+ }
+
+ return nullptr;
+}
+
+nsresult
+OmxDataDecoder::AllocateBuffers(OMX_DIRTYPE aType)
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ return mOmxLayer->AllocateOmxBuffer(aType, GetBuffers(aType));
+}
+
+nsresult
+OmxDataDecoder::ReleaseBuffers(OMX_DIRTYPE aType)
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ return mOmxLayer->ReleaseOmxBuffer(aType, GetBuffers(aType));
+}
+
+nsTArray<RefPtr<OmxPromiseLayer::BufferData>>*
+OmxDataDecoder::GetBuffers(OMX_DIRTYPE aType)
+{
+ MOZ_ASSERT(aType == OMX_DIRTYPE::OMX_DirInput ||
+ aType == OMX_DIRTYPE::OMX_DirOutput);
+
+ if (aType == OMX_DIRTYPE::OMX_DirInput) {
+ return &mInPortBuffers;
+ }
+ return &mOutPortBuffers;
+}
+
+void
+OmxDataDecoder::ResolveInitPromise(const char* aMethodName)
+{
+ LOG("called from %s", aMethodName);
+ RefPtr<OmxDataDecoder> self = this;
+ nsCOMPtr<nsIRunnable> r =
+ NS_NewRunnableFunction([self, aMethodName] () {
+ MOZ_ASSERT(self->mReaderTaskQueue->IsCurrentThreadIn());
+ self->mInitPromise.ResolveIfExists(self->mTrackInfo->GetType(), aMethodName);
+ });
+ mReaderTaskQueue->Dispatch(r.forget());
+}
+
+void
+OmxDataDecoder::RejectInitPromise(MediaResult aError, const char* aMethodName)
+{
+ RefPtr<OmxDataDecoder> self = this;
+ nsCOMPtr<nsIRunnable> r =
+ NS_NewRunnableFunction([self, aError, aMethodName] () {
+ MOZ_ASSERT(self->mReaderTaskQueue->IsCurrentThreadIn());
+ self->mInitPromise.RejectIfExists(aError, aMethodName);
+ });
+ mReaderTaskQueue->Dispatch(r.forget());
+}
+
+void
+OmxDataDecoder::OmxStateRunner()
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+ LOG("OMX state: %s", StateTypeToStr(mOmxState));
+
+ // TODO: maybe it'd be better to use promise CompletionPromise() to replace
+ // this state machine.
+ if (mOmxState == OMX_StateLoaded) {
+ ConfigCodec();
+
+ // Send OpenMax state command to OMX_StateIdle.
+ RefPtr<OmxDataDecoder> self = this;
+ mOmxLayer->SendCommand(OMX_CommandStateSet, OMX_StateIdle, nullptr)
+ ->Then(mOmxTaskQueue, __func__,
+ [self] () {
+ // Current state should be OMX_StateIdle.
+ self->mOmxState = self->mOmxLayer->GetState();
+ MOZ_ASSERT(self->mOmxState == OMX_StateIdle);
+ },
+ [self] () {
+ self->RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ });
+
+ // Allocate input and output buffers.
+ OMX_DIRTYPE types[] = {OMX_DIRTYPE::OMX_DirInput, OMX_DIRTYPE::OMX_DirOutput};
+ for(const auto id : types) {
+ if (NS_FAILED(AllocateBuffers(id))) {
+ LOG("Failed to allocate buffer on port %d", id);
+ RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ break;
+ }
+ }
+ } else if (mOmxState == OMX_StateIdle) {
+ RefPtr<OmxDataDecoder> self = this;
+ mOmxLayer->SendCommand(OMX_CommandStateSet, OMX_StateExecuting, nullptr)
+ ->Then(mOmxTaskQueue, __func__,
+ [self] () {
+ self->mOmxState = self->mOmxLayer->GetState();
+ MOZ_ASSERT(self->mOmxState == OMX_StateExecuting);
+
+ self->ResolveInitPromise(__func__);
+ },
+ [self] () {
+ self->RejectInitPromise(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
+ });
+ } else if (mOmxState == OMX_StateExecuting) {
+ // Configure codec once it gets OMX_StateExecuting state.
+ FillCodecConfigDataToOmx();
+ } else {
+ MOZ_ASSERT(0);
+ }
+}
+
+void
+OmxDataDecoder::ConfigCodec()
+{
+ OMX_ERRORTYPE err = mOmxLayer->Config();
+ CHECK_OMX_ERR(err);
+}
+
+void
+OmxDataDecoder::FillCodecConfigDataToOmx()
+{
+ // Codec configure data should be the first sample running on Omx TaskQueue.
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(!mMediaRawDatas.Length());
+ MOZ_ASSERT(mOmxState == OMX_StateIdle || mOmxState == OMX_StateExecuting);
+
+
+ RefPtr<BufferData> inbuf = FindAvailableBuffer(OMX_DirInput);
+ RefPtr<MediaByteBuffer> csc;
+ if (mTrackInfo->IsAudio()) {
+ csc = mTrackInfo->GetAsAudioInfo()->mCodecSpecificConfig;
+ } else if (mTrackInfo->IsVideo()) {
+ csc = mTrackInfo->GetAsVideoInfo()->mCodecSpecificConfig;
+ }
+
+ MOZ_RELEASE_ASSERT(csc);
+
+ // Some codecs like h264, its codec specific data is at the first packet, not in container.
+ if (csc->Length()) {
+ memcpy(inbuf->mBuffer->pBuffer,
+ csc->Elements(),
+ csc->Length());
+ inbuf->mBuffer->nFilledLen = csc->Length();
+ inbuf->mBuffer->nOffset = 0;
+ inbuf->mBuffer->nFlags = (OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_CODECCONFIG);
+
+ LOG("Feed codec configure data to OMX component");
+ mOmxLayer->EmptyBuffer(inbuf)->Then(mOmxTaskQueue, __func__, this,
+ &OmxDataDecoder::EmptyBufferDone,
+ &OmxDataDecoder::EmptyBufferFailure);
+ }
+}
+
+bool
+OmxDataDecoder::Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2)
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ if (mOmxLayer->Event(aEvent, aData1, aData2)) {
+ return true;
+ }
+
+ switch (aEvent) {
+ case OMX_EventPortSettingsChanged:
+ {
+ // Don't always disable port. See bug 1235340.
+ if (aData2 == 0 ||
+ aData2 == OMX_IndexParamPortDefinition) {
+ // According to spec: "To prevent the loss of any input data, the
+ // component issuing the OMX_EventPortSettingsChanged event on its input
+ // port should buffer all input port data that arrives between the
+ // emission of the OMX_EventPortSettingsChanged event and the arrival of
+ // the command to disable the input port."
+ //
+ // So client needs to disable port and reallocate buffers.
+ MOZ_ASSERT(mPortSettingsChanged == -1);
+ mPortSettingsChanged = aData1;
+ }
+ LOG("Got OMX_EventPortSettingsChanged event");
+ break;
+ }
+ default:
+ {
+ // Got error during decoding, send msg to MFR skipping to next key frame.
+ if (aEvent == OMX_EventError && mOmxState == OMX_StateExecuting) {
+ NotifyError((OMX_ERRORTYPE)aData1, __func__,
+ MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR, __func__));
+ return true;
+ }
+ LOG("WARNING: got none handle event: %d, aData1: %d, aData2: %d",
+ aEvent, aData1, aData2);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+OmxDataDecoder::BuffersCanBeReleased(OMX_DIRTYPE aType)
+{
+ BUFFERLIST* buffers = GetBuffers(aType);
+ uint32_t len = buffers->Length();
+ for (uint32_t i = 0; i < len; i++) {
+ BufferData::BufferStatus buf_status = buffers->ElementAt(i)->mStatus;
+ if (buf_status == BufferData::BufferStatus::OMX_COMPONENT ||
+ buf_status == BufferData::BufferStatus::OMX_CLIENT_OUTPUT) {
+ return false;
+ }
+ }
+ return true;
+}
+
+OMX_DIRTYPE
+OmxDataDecoder::GetPortDirection(uint32_t aPortIndex)
+{
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOmxParameter(&def);
+ def.nPortIndex = mPortSettingsChanged;
+
+ OMX_ERRORTYPE err = mOmxLayer->GetParameter(OMX_IndexParamPortDefinition,
+ &def,
+ sizeof(def));
+ if (err != OMX_ErrorNone) {
+ return OMX_DirMax;
+ }
+ return def.eDir;
+}
+
+RefPtr<OmxPromiseLayer::OmxBufferPromise::AllPromiseType>
+OmxDataDecoder::CollectBufferPromises(OMX_DIRTYPE aType)
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ nsTArray<RefPtr<OmxBufferPromise>> promises;
+ OMX_DIRTYPE types[] = {OMX_DIRTYPE::OMX_DirInput, OMX_DIRTYPE::OMX_DirOutput};
+ for (const auto type : types) {
+ if ((aType == type) || (aType == OMX_DirMax)) {
+ // find the buffer which has promise.
+ BUFFERLIST* buffers = GetBuffers(type);
+
+ for (uint32_t i = 0; i < buffers->Length(); i++) {
+ BufferData* buf = buffers->ElementAt(i);
+ if (!buf->mPromise.IsEmpty()) {
+ // OmxBufferPromise is not exclusive, it can be multiple "Then"s, so it
+ // is safe to call "Ensure" here.
+ promises.AppendElement(buf->mPromise.Ensure(__func__));
+ }
+ }
+ }
+ }
+
+ LOG("CollectBufferPromises: type %d, total %d promiese", aType, promises.Length());
+ if (promises.Length()) {
+ return OmxBufferPromise::All(mOmxTaskQueue, promises);
+ }
+
+ nsTArray<BufferData*> headers;
+ return OmxBufferPromise::AllPromiseType::CreateAndResolve(headers, __func__);
+}
+
+void
+OmxDataDecoder::PortSettingsChanged()
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ if (mPortSettingsChanged == -1 || mOmxState == OMX_STATETYPE::OMX_StateInvalid) {
+ return;
+ }
+
+ // The PortSettingsChanged algorithm:
+ //
+ // 1. disable port.
+ // 2. wait for port buffers return to client and then release these buffers.
+ // 3. enable port.
+ // 4. allocate port buffers.
+ //
+
+ // Disable port. Get port definition if the target port is enable.
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOmxParameter(&def);
+ def.nPortIndex = mPortSettingsChanged;
+
+ OMX_ERRORTYPE err = mOmxLayer->GetParameter(OMX_IndexParamPortDefinition,
+ &def,
+ sizeof(def));
+ CHECK_OMX_ERR(err);
+
+ RefPtr<OmxDataDecoder> self = this;
+ if (def.bEnabled) {
+ // 1. disable port.
+ LOG("PortSettingsChanged: disable port %d", def.nPortIndex);
+ mOmxLayer->SendCommand(OMX_CommandPortDisable, mPortSettingsChanged, nullptr)
+ ->Then(mOmxTaskQueue, __func__,
+ [self, def] () -> RefPtr<OmxCommandPromise> {
+ // 3. enable port.
+ // Send enable port command.
+ RefPtr<OmxCommandPromise> p =
+ self->mOmxLayer->SendCommand(OMX_CommandPortEnable,
+ self->mPortSettingsChanged,
+ nullptr);
+
+ // 4. allocate port buffers.
+ // Allocate new port buffers.
+ nsresult rv = self->AllocateBuffers(def.eDir);
+ if (NS_FAILED(rv)) {
+ self->NotifyError(OMX_ErrorUndefined, __func__);
+ }
+
+ return p;
+ },
+ [self] () {
+ self->NotifyError(OMX_ErrorUndefined, __func__);
+ })
+ ->CompletionPromise()
+ ->Then(mOmxTaskQueue, __func__,
+ [self] () {
+ LOGL("PortSettingsChanged: port settings changed complete");
+ // finish port setting changed.
+ self->mPortSettingsChanged = -1;
+ self->FillAndEmptyBuffers();
+ },
+ [self] () {
+ self->NotifyError(OMX_ErrorUndefined, __func__);
+ });
+
+ // 2. wait for port buffers return to client and then release these buffers.
+ //
+ // Port buffers will be returned to client soon once OMX_CommandPortDisable
+ // command is sent. Then releasing these buffers.
+ CollectBufferPromises(def.eDir)
+ ->Then(mOmxTaskQueue, __func__,
+ [self, def] () {
+ MOZ_ASSERT(self->BuffersCanBeReleased(def.eDir));
+ nsresult rv = self->ReleaseBuffers(def.eDir);
+ if (NS_FAILED(rv)) {
+ MOZ_RELEASE_ASSERT(0);
+ self->NotifyError(OMX_ErrorUndefined, __func__);
+ }
+ },
+ [self] () {
+ self->NotifyError(OMX_ErrorUndefined, __func__);
+ });
+ }
+}
+
+void
+OmxDataDecoder::SendEosBuffer()
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ // There is no 'Drain' API in OpenMax, so it needs to wait for output sample
+ // with EOS flag. However, MediaRawData doesn't provide EOS information,
+ // so here it generates an empty BufferData with eos OMX_BUFFERFLAG_EOS in queue.
+ // This behaviour should be compliant with spec, I think...
+ RefPtr<MediaRawData> eos_data = new MediaRawData();
+ mMediaRawDatas.AppendElement(eos_data);
+ FillAndEmptyBuffers();
+}
+
+void
+OmxDataDecoder::DoFlush()
+{
+ MOZ_ASSERT(mOmxTaskQueue->IsCurrentThreadIn());
+
+ // 1. Call OMX command OMX_CommandFlush in Omx TaskQueue.
+ // 2. Remove all elements in mMediaRawDatas when flush is completed.
+ mOmxLayer->SendCommand(OMX_CommandFlush, OMX_ALL, nullptr)
+ ->Then(mOmxTaskQueue, __func__, this,
+ &OmxDataDecoder::FlushComplete,
+ &OmxDataDecoder::FlushFailure);
+}
+
+void
+OmxDataDecoder::FlushComplete(OMX_COMMANDTYPE aCommandType)
+{
+ mMediaRawDatas.Clear();
+ mFlushing = false;
+
+ MonitorAutoLock lock(mMonitor);
+ mMonitor.Notify();
+ LOG("Flush complete");
+}
+
+void OmxDataDecoder::FlushFailure(OmxCommandFailureHolder aFailureHolder)
+{
+ NotifyError(OMX_ErrorUndefined, __func__);
+ mFlushing = false;
+
+ MonitorAutoLock lock(mMonitor);
+ mMonitor.Notify();
+}
+
+MediaDataHelper::MediaDataHelper(const TrackInfo* aTrackInfo,
+ layers::ImageContainer* aImageContainer,
+ OmxPromiseLayer* aOmxLayer)
+ : mTrackInfo(aTrackInfo)
+ , mAudioCompactor(mAudioQueue)
+ , mImageContainer(aImageContainer)
+{
+ InitOmxParameter(&mOutputPortDef);
+ mOutputPortDef.nPortIndex = aOmxLayer->OutputPortIndex();
+ aOmxLayer->GetParameter(OMX_IndexParamPortDefinition, &mOutputPortDef, sizeof(mOutputPortDef));
+}
+
+already_AddRefed<MediaData>
+MediaDataHelper::GetMediaData(BufferData* aBufferData, bool& aPlatformDepenentData)
+{
+ aPlatformDepenentData = false;
+ RefPtr<MediaData> data;
+
+ if (mTrackInfo->IsAudio()) {
+ if (!aBufferData->mBuffer->nFilledLen) {
+ return nullptr;
+ }
+ data = CreateAudioData(aBufferData);
+ } else if (mTrackInfo->IsVideo()) {
+ data = aBufferData->GetPlatformMediaData();
+ if (data) {
+ aPlatformDepenentData = true;
+ } else {
+ if (!aBufferData->mBuffer->nFilledLen) {
+ return nullptr;
+ }
+ // Get YUV VideoData, it uses more CPU, in most cases, on software codec.
+ data = CreateYUV420VideoData(aBufferData);
+ }
+
+ // Update video time code, duration... from the raw data.
+ VideoData* video(data->As<VideoData>());
+ if (aBufferData->mRawData) {
+ video->mTime = aBufferData->mRawData->mTime;
+ video->mTimecode = aBufferData->mRawData->mTimecode;
+ video->mOffset = aBufferData->mRawData->mOffset;
+ video->mDuration = aBufferData->mRawData->mDuration;
+ video->mKeyframe = aBufferData->mRawData->mKeyframe;
+ }
+ }
+
+ return data.forget();
+}
+
+already_AddRefed<AudioData>
+MediaDataHelper::CreateAudioData(BufferData* aBufferData)
+{
+ RefPtr<AudioData> audio;
+ OMX_BUFFERHEADERTYPE* buf = aBufferData->mBuffer;
+ const AudioInfo* info = mTrackInfo->GetAsAudioInfo();
+ if (buf->nFilledLen) {
+ uint64_t offset = 0;
+ uint32_t frames = buf->nFilledLen / (2 * info->mChannels);
+ if (aBufferData->mRawData) {
+ offset = aBufferData->mRawData->mOffset;
+ }
+ typedef AudioCompactor::NativeCopy OmxCopy;
+ mAudioCompactor.Push(offset,
+ buf->nTimeStamp,
+ info->mRate,
+ frames,
+ info->mChannels,
+ OmxCopy(buf->pBuffer + buf->nOffset,
+ buf->nFilledLen,
+ info->mChannels));
+ audio = mAudioQueue.PopFront();
+ }
+
+ return audio.forget();
+}
+
+already_AddRefed<VideoData>
+MediaDataHelper::CreateYUV420VideoData(BufferData* aBufferData)
+{
+ uint8_t *yuv420p_buffer = (uint8_t *)aBufferData->mBuffer->pBuffer;
+ int32_t stride = mOutputPortDef.format.video.nStride;
+ int32_t slice_height = mOutputPortDef.format.video.nSliceHeight;
+ int32_t width = mTrackInfo->GetAsVideoInfo()->mImage.width;
+ int32_t height = mTrackInfo->GetAsVideoInfo()->mImage.height;
+
+ // TODO: convert other formats to YUV420.
+ if (mOutputPortDef.format.video.eColorFormat != OMX_COLOR_FormatYUV420Planar) {
+ return nullptr;
+ }
+
+ size_t yuv420p_y_size = stride * slice_height;
+ size_t yuv420p_u_size = ((stride + 1) / 2) * ((slice_height + 1) / 2);
+ uint8_t *yuv420p_y = yuv420p_buffer;
+ uint8_t *yuv420p_u = yuv420p_y + yuv420p_y_size;
+ uint8_t *yuv420p_v = yuv420p_u + yuv420p_u_size;
+
+ VideoData::YCbCrBuffer b;
+ b.mPlanes[0].mData = yuv420p_y;
+ b.mPlanes[0].mWidth = width;
+ b.mPlanes[0].mHeight = height;
+ b.mPlanes[0].mStride = stride;
+ b.mPlanes[0].mOffset = 0;
+ b.mPlanes[0].mSkip = 0;
+
+ b.mPlanes[1].mData = yuv420p_u;
+ b.mPlanes[1].mWidth = (width + 1) / 2;
+ b.mPlanes[1].mHeight = (height + 1) / 2;
+ b.mPlanes[1].mStride = (stride + 1) / 2;
+ b.mPlanes[1].mOffset = 0;
+ b.mPlanes[1].mSkip = 0;
+
+ b.mPlanes[2].mData = yuv420p_v;
+ b.mPlanes[2].mWidth =(width + 1) / 2;
+ b.mPlanes[2].mHeight = (height + 1) / 2;
+ b.mPlanes[2].mStride = (stride + 1) / 2;
+ b.mPlanes[2].mOffset = 0;
+ b.mPlanes[2].mSkip = 0;
+
+ VideoInfo info(*mTrackInfo->GetAsVideoInfo());
+ RefPtr<VideoData> data =
+ VideoData::CreateAndCopyData(info,
+ mImageContainer,
+ 0, // Filled later by caller.
+ 0, // Filled later by caller.
+ 1, // We don't know the duration.
+ b,
+ 0, // Filled later by caller.
+ -1,
+ info.ImageRect());
+
+ LOG("YUV420 VideoData: disp width %d, height %d, pic width %d, height %d, time %ld",
+ info.mDisplay.width, info.mDisplay.height, info.mImage.width,
+ info.mImage.height, aBufferData->mBuffer->nTimeStamp);
+
+ return data.forget();
+}
+
+}
diff --git a/dom/media/platforms/omx/OmxDataDecoder.h b/dom/media/platforms/omx/OmxDataDecoder.h
new file mode 100644
index 000000000..ea75b2a2a
--- /dev/null
+++ b/dom/media/platforms/omx/OmxDataDecoder.h
@@ -0,0 +1,214 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(OmxDataDecoder_h_)
+#define OmxDataDecoder_h_
+
+#include "mozilla/Monitor.h"
+
+#include "AudioCompactor.h"
+#include "ImageContainer.h"
+#include "MediaInfo.h"
+#include "PlatformDecoderModule.h"
+
+#include "OMX_Component.h"
+
+#include "OmxPromiseLayer.h"
+
+namespace mozilla {
+
+class MediaDataHelper;
+
+typedef OmxPromiseLayer::OmxCommandPromise OmxCommandPromise;
+typedef OmxPromiseLayer::OmxBufferPromise OmxBufferPromise;
+typedef OmxPromiseLayer::OmxBufferFailureHolder OmxBufferFailureHolder;
+typedef OmxPromiseLayer::OmxCommandFailureHolder OmxCommandFailureHolder;
+typedef OmxPromiseLayer::BufferData BufferData;
+typedef OmxPromiseLayer::BUFFERLIST BUFFERLIST;
+
+/* OmxDataDecoder is the major class which performs followings:
+ * 1. Translate PDM function into OMX commands.
+ * 2. Keeping the buffers between client and component.
+ * 3. Manage the OMX state.
+ *
+ * From the definition in OpenMax spec. "2.2.1", there are 3 major roles in
+ * OpenMax IL.
+ *
+ * IL client:
+ * "The IL client may be a layer below the GUI application, such as GStreamer,
+ * or may be several layers below the GUI layer."
+ *
+ * OmxDataDecoder acts as the IL client.
+ *
+ * OpenMAX IL component:
+ * "A component that is intended to wrap functionality that is required in the
+ * target system."
+ *
+ * OmxPromiseLayer acts as the OpenMAX IL component.
+ *
+ * OpenMAX IL core:
+ * "Platform-specific code that has the functionality necessary to locate and
+ * then load an OpenMAX IL component into main memory."
+ *
+ * OmxPlatformLayer acts as the OpenMAX IL core.
+ */
+class OmxDataDecoder : public MediaDataDecoder {
+protected:
+ virtual ~OmxDataDecoder();
+
+public:
+ OmxDataDecoder(const TrackInfo& aTrackInfo,
+ MediaDataDecoderCallback* aCallback,
+ layers::ImageContainer* aImageContainer);
+
+ RefPtr<InitPromise> Init() override;
+
+ void Input(MediaRawData* aSample) override;
+
+ void Flush() override;
+
+ void Drain() override;
+
+ void Shutdown() override;
+
+ const char* GetDescriptionName() const override
+ {
+ return "omx decoder";
+ }
+
+ // Return true if event is handled.
+ bool Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2);
+
+protected:
+ void InitializationTask();
+
+ void ResolveInitPromise(const char* aMethodName);
+
+ void RejectInitPromise(MediaResult aError, const char* aMethodName);
+
+ void OmxStateRunner();
+
+ void FillAndEmptyBuffers();
+
+ void FillBufferDone(BufferData* aData);
+
+ void FillBufferFailure(OmxBufferFailureHolder aFailureHolder);
+
+ void EmptyBufferDone(BufferData* aData);
+
+ void EmptyBufferFailure(OmxBufferFailureHolder aFailureHolder);
+
+ void NotifyError(OMX_ERRORTYPE aOmxError,
+ const char* aLine,
+ const MediaResult& aError = MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR));
+
+ // Configure audio/video codec.
+ // Some codec may just ignore this and rely on codec specific data in
+ // FillCodecConfigDataToOmx().
+ void ConfigCodec();
+
+ // Sending codec specific data to OMX component. OMX component could send a
+ // OMX_EventPortSettingsChanged back to client. And then client needs to
+ // disable port and reallocate buffer.
+ void FillCodecConfigDataToOmx();
+
+ void SendEosBuffer();
+
+ void EndOfStream();
+
+ // It could be called after codec specific data is sent and component found
+ // the port format is changed due to different codec specific.
+ void PortSettingsChanged();
+
+ void Output(BufferData* aData);
+
+ // Buffer can be released if its status is not OMX_COMPONENT or
+ // OMX_CLIENT_OUTPUT.
+ bool BuffersCanBeReleased(OMX_DIRTYPE aType);
+
+ OMX_DIRTYPE GetPortDirection(uint32_t aPortIndex);
+
+ void DoAsyncShutdown();
+
+ void DoFlush();
+
+ void FlushComplete(OMX_COMMANDTYPE aCommandType);
+
+ void FlushFailure(OmxCommandFailureHolder aFailureHolder);
+
+ BUFFERLIST* GetBuffers(OMX_DIRTYPE aType);
+
+ nsresult AllocateBuffers(OMX_DIRTYPE aType);
+
+ nsresult ReleaseBuffers(OMX_DIRTYPE aType);
+
+ BufferData* FindAvailableBuffer(OMX_DIRTYPE aType);
+
+ // aType could be OMX_DirMax for all types.
+ RefPtr<OmxPromiseLayer::OmxBufferPromise::AllPromiseType>
+ CollectBufferPromises(OMX_DIRTYPE aType);
+
+ Monitor mMonitor;
+
+ // The Omx TaskQueue.
+ RefPtr<TaskQueue> mOmxTaskQueue;
+
+ RefPtr<TaskQueue> mReaderTaskQueue;
+
+ RefPtr<layers::ImageContainer> mImageContainer;
+
+ WatchManager<OmxDataDecoder> mWatchManager;
+
+ // It is accessed in omx TaskQueue.
+ Watchable<OMX_STATETYPE> mOmxState;
+
+ RefPtr<OmxPromiseLayer> mOmxLayer;
+
+ UniquePtr<TrackInfo> mTrackInfo;
+
+ // It is accessed in both omx and reader TaskQueue.
+ Atomic<bool> mFlushing;
+
+ // It is accessed in Omx/reader TaskQeueu.
+ Atomic<bool> mShuttingDown;
+
+ // It is accessed in Omx TaskQeueu.
+ bool mCheckingInputExhausted;
+
+ // It is accessed in reader TaskQueue.
+ MozPromiseHolder<InitPromise> mInitPromise;
+
+ // It is written in Omx TaskQeueu. Read in Omx TaskQueue.
+ // It value means the port index which port settings is changed.
+ // -1 means no port setting changed.
+ //
+ // Note: when port setting changed, there should be no buffer operations
+ // via EmptyBuffer or FillBuffer.
+ Watchable<int32_t> mPortSettingsChanged;
+
+ // It is access in Omx TaskQueue.
+ nsTArray<RefPtr<MediaRawData>> mMediaRawDatas;
+
+ BUFFERLIST mInPortBuffers;
+
+ BUFFERLIST mOutPortBuffers;
+
+ RefPtr<MediaDataHelper> mMediaDataHelper;
+
+ MediaDataDecoderCallback* mCallback;
+};
+
+template<class T>
+void InitOmxParameter(T* aParam)
+{
+ PodZero(aParam);
+ aParam->nSize = sizeof(T);
+ aParam->nVersion.s.nVersionMajor = 1;
+}
+
+}
+
+#endif /* OmxDataDecoder_h_ */
diff --git a/dom/media/platforms/omx/OmxDecoderModule.cpp b/dom/media/platforms/omx/OmxDecoderModule.cpp
new file mode 100644
index 000000000..9a3c2f929
--- /dev/null
+++ b/dom/media/platforms/omx/OmxDecoderModule.cpp
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OmxDecoderModule.h"
+
+#include "OmxDataDecoder.h"
+#include "OmxPlatformLayer.h"
+
+namespace mozilla {
+
+already_AddRefed<MediaDataDecoder>
+OmxDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(aParams.mConfig,
+ aParams.mCallback,
+ aParams.mImageContainer);
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+OmxDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ RefPtr<OmxDataDecoder> decoder = new OmxDataDecoder(aParams.mConfig,
+ aParams.mCallback,
+ nullptr);
+ return decoder.forget();
+}
+
+PlatformDecoderModule::ConversionRequired
+OmxDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ return ConversionRequired::kNeedNone;
+}
+
+bool
+OmxDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ return OmxPlatformLayer::SupportsMimeType(aMimeType);
+}
+
+}
diff --git a/dom/media/platforms/omx/OmxDecoderModule.h b/dom/media/platforms/omx/OmxDecoderModule.h
new file mode 100644
index 000000000..432368a69
--- /dev/null
+++ b/dom/media/platforms/omx/OmxDecoderModule.h
@@ -0,0 +1,30 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(OmxDecoderModule_h_)
+#define OmxDecoderModule_h_
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class OmxDecoderModule : public PlatformDecoderModule {
+public:
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ ConversionRequired DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+};
+
+} // namespace mozilla
+
+#endif // OmxDecoderModule_h_
diff --git a/dom/media/platforms/omx/OmxPlatformLayer.cpp b/dom/media/platforms/omx/OmxPlatformLayer.cpp
new file mode 100644
index 000000000..d1f43144d
--- /dev/null
+++ b/dom/media/platforms/omx/OmxPlatformLayer.cpp
@@ -0,0 +1,327 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OmxPlatformLayer.h"
+
+#include "OMX_VideoExt.h" // For VP8.
+
+#if defined(MOZ_WIDGET_GONK) && (ANDROID_VERSION == 20 || ANDROID_VERSION == 19)
+#define OMX_PLATFORM_GONK
+#include "GonkOmxPlatformLayer.h"
+#endif
+
+#include "VPXDecoder.h"
+
+#ifdef LOG
+#undef LOG
+#endif
+
+#define LOG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("OmxPlatformLayer -- %s: " arg, __func__, ##__VA_ARGS__))
+
+#define RETURN_IF_ERR(err) \
+ if (err != OMX_ErrorNone) { \
+ LOG("error: 0x%08x", err); \
+ return err; \
+ } \
+
+// Common OMX decoder configuration code.
+namespace mozilla {
+
+// This helper class encapsulates the details of component parameters setting
+// for different OMX audio & video codecs.
+template<typename ParamType>
+class OmxConfig
+{
+public:
+ virtual ~OmxConfig() {}
+ // Subclasses should implement this method to configure the codec.
+ virtual OMX_ERRORTYPE Apply(OmxPlatformLayer& aOmx, const ParamType& aParam) = 0;
+};
+
+typedef OmxConfig<AudioInfo> OmxAudioConfig;
+typedef OmxConfig<VideoInfo> OmxVideoConfig;
+
+template<typename ConfigType>
+UniquePtr<ConfigType> ConfigForMime(const nsACString&);
+
+static OMX_ERRORTYPE
+ConfigAudioOutputPort(OmxPlatformLayer& aOmx, const AudioInfo& aInfo)
+{
+ OMX_ERRORTYPE err;
+
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOmxParameter(&def);
+ def.nPortIndex = aOmx.OutputPortIndex();
+ err = aOmx.GetParameter(OMX_IndexParamPortDefinition, &def, sizeof(def));
+ RETURN_IF_ERR(err);
+
+ def.format.audio.eEncoding = OMX_AUDIO_CodingPCM;
+ err = aOmx.SetParameter(OMX_IndexParamPortDefinition, &def, sizeof(def));
+ RETURN_IF_ERR(err);
+
+ OMX_AUDIO_PARAM_PCMMODETYPE pcmParams;
+ InitOmxParameter(&pcmParams);
+ pcmParams.nPortIndex = def.nPortIndex;
+ err = aOmx.GetParameter(OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ RETURN_IF_ERR(err);
+
+ pcmParams.nChannels = aInfo.mChannels;
+ pcmParams.eNumData = OMX_NumericalDataSigned;
+ pcmParams.bInterleaved = OMX_TRUE;
+ pcmParams.nBitPerSample = 16;
+ pcmParams.nSamplingRate = aInfo.mRate;
+ pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
+ err = aOmx.SetParameter(OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ RETURN_IF_ERR(err);
+
+ LOG("Config OMX_IndexParamAudioPcm, channel %d, sample rate %d",
+ pcmParams.nChannels, pcmParams.nSamplingRate);
+
+ return OMX_ErrorNone;
+}
+
+class OmxAacConfig : public OmxAudioConfig
+{
+public:
+ OMX_ERRORTYPE Apply(OmxPlatformLayer& aOmx, const AudioInfo& aInfo) override
+ {
+ OMX_ERRORTYPE err;
+
+ OMX_AUDIO_PARAM_AACPROFILETYPE aacProfile;
+ InitOmxParameter(&aacProfile);
+ aacProfile.nPortIndex = aOmx.InputPortIndex();
+ err = aOmx.GetParameter(OMX_IndexParamAudioAac, &aacProfile, sizeof(aacProfile));
+ RETURN_IF_ERR(err);
+
+ aacProfile.nChannels = aInfo.mChannels;
+ aacProfile.nSampleRate = aInfo.mRate;
+ aacProfile.eAACProfile = static_cast<OMX_AUDIO_AACPROFILETYPE>(aInfo.mProfile);
+ err = aOmx.SetParameter(OMX_IndexParamAudioAac, &aacProfile, sizeof(aacProfile));
+ RETURN_IF_ERR(err);
+
+ LOG("Config OMX_IndexParamAudioAac, channel %d, sample rate %d, profile %d",
+ aacProfile.nChannels, aacProfile.nSampleRate, aacProfile.eAACProfile);
+
+ return ConfigAudioOutputPort(aOmx, aInfo);
+ }
+};
+
+class OmxMp3Config : public OmxAudioConfig
+{
+public:
+ OMX_ERRORTYPE Apply(OmxPlatformLayer& aOmx, const AudioInfo& aInfo) override
+ {
+ OMX_ERRORTYPE err;
+
+ OMX_AUDIO_PARAM_MP3TYPE mp3Param;
+ InitOmxParameter(&mp3Param);
+ mp3Param.nPortIndex = aOmx.InputPortIndex();
+ err = aOmx.GetParameter(OMX_IndexParamAudioMp3, &mp3Param, sizeof(mp3Param));
+ RETURN_IF_ERR(err);
+
+ mp3Param.nChannels = aInfo.mChannels;
+ mp3Param.nSampleRate = aInfo.mRate;
+ err = aOmx.SetParameter(OMX_IndexParamAudioMp3, &mp3Param, sizeof(mp3Param));
+ RETURN_IF_ERR(err);
+
+ LOG("Config OMX_IndexParamAudioMp3, channel %d, sample rate %d",
+ mp3Param.nChannels, mp3Param.nSampleRate);
+
+ return ConfigAudioOutputPort(aOmx, aInfo);
+ }
+};
+
+enum OmxAmrSampleRate {
+ kNarrowBand = 8000,
+ kWideBand = 16000,
+};
+
+template <OmxAmrSampleRate R>
+class OmxAmrConfig : public OmxAudioConfig
+{
+public:
+ OMX_ERRORTYPE Apply(OmxPlatformLayer& aOmx, const AudioInfo& aInfo) override
+ {
+ OMX_ERRORTYPE err;
+
+ OMX_AUDIO_PARAM_AMRTYPE def;
+ InitOmxParameter(&def);
+ def.nPortIndex = aOmx.InputPortIndex();
+ err = aOmx.GetParameter(OMX_IndexParamAudioAmr, &def, sizeof(def));
+ RETURN_IF_ERR(err);
+
+ def.eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF;
+ err = aOmx.SetParameter(OMX_IndexParamAudioAmr, &def, sizeof(def));
+ RETURN_IF_ERR(err);
+
+ MOZ_ASSERT(aInfo.mChannels == 1);
+ MOZ_ASSERT(aInfo.mRate == R);
+
+ return ConfigAudioOutputPort(aOmx, aInfo);
+ }
+};
+
+template<>
+UniquePtr<OmxAudioConfig>
+ConfigForMime(const nsACString& aMimeType)
+{
+ UniquePtr<OmxAudioConfig> conf;
+
+ if (OmxPlatformLayer::SupportsMimeType(aMimeType)) {
+ if (aMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ conf.reset(new OmxAacConfig());
+ } else if (aMimeType.EqualsLiteral("audio/mp3") ||
+ aMimeType.EqualsLiteral("audio/mpeg")) {
+ conf.reset(new OmxMp3Config());
+ } else if (aMimeType.EqualsLiteral("audio/3gpp")) {
+ conf.reset(new OmxAmrConfig<OmxAmrSampleRate::kNarrowBand>());
+ } else if (aMimeType.EqualsLiteral("audio/amr-wb")) {
+ conf.reset(new OmxAmrConfig<OmxAmrSampleRate::kWideBand>());
+ }
+ }
+ return Move(conf);
+}
+
+// There should be a better way to calculate it.
+#define MIN_VIDEO_INPUT_BUFFER_SIZE 64 * 1024
+
+class OmxCommonVideoConfig : public OmxVideoConfig
+{
+public:
+ explicit OmxCommonVideoConfig()
+ : OmxVideoConfig()
+ {}
+
+ OMX_ERRORTYPE Apply(OmxPlatformLayer& aOmx, const VideoInfo& aInfo) override
+ {
+ OMX_ERRORTYPE err;
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+
+ // Set up in/out port definition.
+ nsTArray<uint32_t> ports;
+ aOmx.GetPortIndices(ports);
+ for (auto idx : ports) {
+ InitOmxParameter(&def);
+ def.nPortIndex = idx;
+ err = aOmx.GetParameter(OMX_IndexParamPortDefinition, &def, sizeof(def));
+ RETURN_IF_ERR(err);
+
+ def.format.video.nFrameWidth = aInfo.mDisplay.width;
+ def.format.video.nFrameHeight = aInfo.mDisplay.height;
+ def.format.video.nStride = aInfo.mImage.width;
+ def.format.video.nSliceHeight = aInfo.mImage.height;
+
+ if (def.eDir == OMX_DirInput) {
+ def.format.video.eCompressionFormat = aOmx.CompressionFormat();
+ def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
+ if (def.nBufferSize < MIN_VIDEO_INPUT_BUFFER_SIZE) {
+ def.nBufferSize = aInfo.mImage.width * aInfo.mImage.height;
+ LOG("Change input buffer size to %d", def.nBufferSize);
+ }
+ } else {
+ def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
+ }
+
+ err = aOmx.SetParameter(OMX_IndexParamPortDefinition, &def, sizeof(def));
+ }
+ return err;
+ }
+};
+
+template<>
+UniquePtr<OmxVideoConfig>
+ConfigForMime(const nsACString& aMimeType)
+{
+ UniquePtr<OmxVideoConfig> conf;
+
+ if (OmxPlatformLayer::SupportsMimeType(aMimeType)) {
+ conf.reset(new OmxCommonVideoConfig());
+ }
+ return Move(conf);
+}
+
+OMX_ERRORTYPE
+OmxPlatformLayer::Config()
+{
+ MOZ_ASSERT(mInfo);
+
+ OMX_PORT_PARAM_TYPE portParam;
+ InitOmxParameter(&portParam);
+ if (mInfo->IsAudio()) {
+ GetParameter(OMX_IndexParamAudioInit, &portParam, sizeof(portParam));
+ mStartPortNumber = portParam.nStartPortNumber;
+ UniquePtr<OmxAudioConfig> conf(ConfigForMime<OmxAudioConfig>(mInfo->mMimeType));
+ MOZ_ASSERT(conf.get());
+ return conf->Apply(*this, *(mInfo->GetAsAudioInfo()));
+ } else if (mInfo->IsVideo()) {
+ GetParameter(OMX_IndexParamVideoInit, &portParam, sizeof(portParam));
+ UniquePtr<OmxVideoConfig> conf(ConfigForMime<OmxVideoConfig>(mInfo->mMimeType));
+ MOZ_ASSERT(conf.get());
+ return conf->Apply(*this, *(mInfo->GetAsVideoInfo()));
+ } else {
+ MOZ_ASSERT_UNREACHABLE("non-AV data (text?) is not supported.");
+ return OMX_ErrorNotImplemented;
+ }
+}
+
+OMX_VIDEO_CODINGTYPE
+OmxPlatformLayer::CompressionFormat()
+{
+ MOZ_ASSERT(mInfo);
+
+ if (mInfo->mMimeType.EqualsLiteral("video/avc")) {
+ return OMX_VIDEO_CodingAVC;
+ } else if (mInfo->mMimeType.EqualsLiteral("video/mp4v-es") ||
+ mInfo->mMimeType.EqualsLiteral("video/mp4")) {
+ return OMX_VIDEO_CodingMPEG4;
+ } else if (mInfo->mMimeType.EqualsLiteral("video/3gpp")) {
+ return OMX_VIDEO_CodingH263;
+ } else if (VPXDecoder::IsVP8(mInfo->mMimeType)) {
+ return static_cast<OMX_VIDEO_CODINGTYPE>(OMX_VIDEO_CodingVP8);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unsupported compression format");
+ return OMX_VIDEO_CodingUnused;
+ }
+}
+
+// Implementations for different platforms will be defined in their own files.
+#ifdef OMX_PLATFORM_GONK
+
+bool
+OmxPlatformLayer::SupportsMimeType(const nsACString& aMimeType)
+{
+ return GonkOmxPlatformLayer::FindComponents(aMimeType);
+}
+
+OmxPlatformLayer*
+OmxPlatformLayer::Create(OmxDataDecoder* aDataDecoder,
+ OmxPromiseLayer* aPromiseLayer,
+ TaskQueue* aTaskQueue,
+ layers::ImageContainer* aImageContainer)
+{
+ return new GonkOmxPlatformLayer(aDataDecoder, aPromiseLayer, aTaskQueue, aImageContainer);
+}
+
+#else // For platforms without OMX IL support.
+
+bool
+OmxPlatformLayer::SupportsMimeType(const nsACString& aMimeType)
+{
+ return false;
+}
+
+OmxPlatformLayer*
+OmxPlatformLayer::Create(OmxDataDecoder* aDataDecoder,
+ OmxPromiseLayer* aPromiseLayer,
+ TaskQueue* aTaskQueue,
+ layers::ImageContainer* aImageContainer)
+{
+ return nullptr;
+}
+
+#endif
+
+}
diff --git a/dom/media/platforms/omx/OmxPlatformLayer.h b/dom/media/platforms/omx/OmxPlatformLayer.h
new file mode 100644
index 000000000..67d9e448f
--- /dev/null
+++ b/dom/media/platforms/omx/OmxPlatformLayer.h
@@ -0,0 +1,100 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(OmxPlatformLayer_h_)
+#define OmxPlatformLayer_h_
+
+#include "OMX_Core.h"
+#include "OMX_Types.h"
+
+#include "OmxPromiseLayer.h"
+
+class nsACString;
+
+namespace mozilla {
+
+class TaskQueue;
+class TrackInfo;
+
+/*
+ * This class the the abstract layer of the platform OpenMax IL implementation.
+ *
+ * For some platform like andoird, it exposures its OpenMax IL via IOMX which
+ * is definitions are different comparing to standard.
+ * For other platforms like Raspberry Pi, it will be easy to implement this layer
+ * with the standard OpenMax IL api.
+ */
+class OmxPlatformLayer {
+public:
+ typedef OmxPromiseLayer::BUFFERLIST BUFFERLIST;
+ typedef OmxPromiseLayer::BufferData BufferData;
+
+ virtual OMX_ERRORTYPE InitOmxToStateLoaded(const TrackInfo* aInfo) = 0;
+
+ OMX_ERRORTYPE Config();
+
+ virtual OMX_ERRORTYPE EmptyThisBuffer(BufferData* aData) = 0;
+
+ virtual OMX_ERRORTYPE FillThisBuffer(BufferData* aData) = 0;
+
+ virtual OMX_ERRORTYPE SendCommand(OMX_COMMANDTYPE aCmd,
+ OMX_U32 aParam1,
+ OMX_PTR aCmdData) = 0;
+
+ // Buffer could be platform dependent; for example, video decoding needs gralloc
+ // on Gonk. Therefore, derived class needs to implement its owned buffer
+ // allocate/release API according to its platform type.
+ virtual nsresult AllocateOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBufferList) = 0;
+
+ virtual nsresult ReleaseOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBufferList) = 0;
+
+ virtual OMX_ERRORTYPE GetState(OMX_STATETYPE* aType) = 0;
+
+ virtual OMX_ERRORTYPE GetParameter(OMX_INDEXTYPE aParamIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize) = 0;
+
+ virtual OMX_ERRORTYPE SetParameter(OMX_INDEXTYPE nIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize) = 0;
+
+ virtual nsresult Shutdown() = 0;
+
+ virtual ~OmxPlatformLayer() {}
+
+ // For decoders, input port index is start port number and output port is next.
+ // See OpenMAX IL spec v1.1.2 section 8.6.1 & 8.8.1.
+ OMX_U32 InputPortIndex() { return mStartPortNumber; }
+
+ OMX_U32 OutputPortIndex() { return mStartPortNumber + 1; }
+
+ void GetPortIndices(nsTArray<uint32_t>& aPortIndex) {
+ aPortIndex.AppendElement(InputPortIndex());
+ aPortIndex.AppendElement(OutputPortIndex());
+ }
+
+ virtual OMX_VIDEO_CODINGTYPE CompressionFormat();
+
+ // Check if the platform implementation supports given MIME type.
+ static bool SupportsMimeType(const nsACString& aMimeType);
+
+ // Hide the details of creating implementation objects for different platforms.
+ static OmxPlatformLayer* Create(OmxDataDecoder* aDataDecoder,
+ OmxPromiseLayer* aPromiseLayer,
+ TaskQueue* aTaskQueue,
+ layers::ImageContainer* aImageContainer);
+
+protected:
+ OmxPlatformLayer() : mInfo(nullptr), mStartPortNumber(0) {}
+
+ // The pointee is held by |OmxDataDecoder::mTrackInfo| and will outlive this pointer.
+ const TrackInfo* mInfo;
+ OMX_U32 mStartPortNumber;
+};
+
+}
+
+#endif // OmxPlatformLayer_h_
diff --git a/dom/media/platforms/omx/OmxPromiseLayer.cpp b/dom/media/platforms/omx/OmxPromiseLayer.cpp
new file mode 100644
index 000000000..0c794289d
--- /dev/null
+++ b/dom/media/platforms/omx/OmxPromiseLayer.cpp
@@ -0,0 +1,382 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OmxPromiseLayer.h"
+
+#include "ImageContainer.h"
+
+#include "OmxDataDecoder.h"
+#include "OmxPlatformLayer.h"
+
+
+#ifdef LOG
+#undef LOG
+#endif
+
+#define LOG(arg, ...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, ("OmxPromiseLayer(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+namespace mozilla {
+
+OmxPromiseLayer::OmxPromiseLayer(TaskQueue* aTaskQueue,
+ OmxDataDecoder* aDataDecoder,
+ layers::ImageContainer* aImageContainer)
+ : mTaskQueue(aTaskQueue)
+{
+ mPlatformLayer = OmxPlatformLayer::Create(aDataDecoder,
+ this,
+ aTaskQueue,
+ aImageContainer);
+ MOZ_ASSERT(!!mPlatformLayer);
+}
+
+RefPtr<OmxPromiseLayer::OmxCommandPromise>
+OmxPromiseLayer::Init(const TrackInfo* aInfo)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ OMX_ERRORTYPE err = mPlatformLayer->InitOmxToStateLoaded(aInfo);
+ if (err != OMX_ErrorNone) {
+ OmxCommandFailureHolder failure(OMX_ErrorUndefined, OMX_CommandStateSet);
+ return OmxCommandPromise::CreateAndReject(failure, __func__);
+ }
+
+ OMX_STATETYPE state = GetState();
+ if (state == OMX_StateLoaded) {
+ return OmxCommandPromise::CreateAndResolve(OMX_CommandStateSet, __func__);
+ } if (state == OMX_StateIdle) {
+ return SendCommand(OMX_CommandStateSet, OMX_StateIdle, nullptr);
+ }
+
+ OmxCommandFailureHolder failure(OMX_ErrorUndefined, OMX_CommandStateSet);
+ return OmxCommandPromise::CreateAndReject(failure, __func__);
+}
+
+OMX_ERRORTYPE
+OmxPromiseLayer::Config()
+{
+ MOZ_ASSERT(GetState() == OMX_StateLoaded);
+
+ return mPlatformLayer->Config();
+}
+
+RefPtr<OmxPromiseLayer::OmxBufferPromise>
+OmxPromiseLayer::FillBuffer(BufferData* aData)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ LOG("buffer %p", aData->mBuffer);
+
+ RefPtr<OmxBufferPromise> p = aData->mPromise.Ensure(__func__);
+
+ OMX_ERRORTYPE err = mPlatformLayer->FillThisBuffer(aData);
+
+ if (err != OMX_ErrorNone) {
+ OmxBufferFailureHolder failure(err, aData);
+ aData->mPromise.Reject(Move(failure), __func__);
+ } else {
+ aData->mStatus = BufferData::BufferStatus::OMX_COMPONENT;
+ GetBufferHolders(OMX_DirOutput)->AppendElement(aData);
+ }
+
+ return p;
+}
+
+RefPtr<OmxPromiseLayer::OmxBufferPromise>
+OmxPromiseLayer::EmptyBuffer(BufferData* aData)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ LOG("buffer %p, size %d", aData->mBuffer, aData->mBuffer->nFilledLen);
+
+ RefPtr<OmxBufferPromise> p = aData->mPromise.Ensure(__func__);
+
+ OMX_ERRORTYPE err = mPlatformLayer->EmptyThisBuffer(aData);
+
+ if (err != OMX_ErrorNone) {
+ OmxBufferFailureHolder failure(err, aData);
+ aData->mPromise.Reject(Move(failure), __func__);
+ } else {
+ if (aData->mRawData) {
+ mRawDatas.AppendElement(Move(aData->mRawData));
+ }
+ aData->mStatus = BufferData::BufferStatus::OMX_COMPONENT;
+ GetBufferHolders(OMX_DirInput)->AppendElement(aData);
+ }
+
+ return p;
+}
+
+OmxPromiseLayer::BUFFERLIST*
+OmxPromiseLayer::GetBufferHolders(OMX_DIRTYPE aType)
+{
+ MOZ_ASSERT(aType == OMX_DirInput || aType == OMX_DirOutput);
+
+ if (aType == OMX_DirInput) {
+ return &mInbufferHolders;
+ }
+
+ return &mOutbufferHolders;
+}
+
+already_AddRefed<MediaRawData>
+OmxPromiseLayer::FindAndRemoveRawData(OMX_TICKS aTimecode)
+{
+ for (auto raw : mRawDatas) {
+ if (raw->mTime == aTimecode) {
+ mRawDatas.RemoveElement(raw);
+ return raw.forget();
+ }
+ }
+ return nullptr;
+}
+
+already_AddRefed<BufferData>
+OmxPromiseLayer::FindAndRemoveBufferHolder(OMX_DIRTYPE aType,
+ BufferData::BufferID aId)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ RefPtr<BufferData> holder;
+ BUFFERLIST* holders = GetBufferHolders(aType);
+
+ for (uint32_t i = 0; i < holders->Length(); i++) {
+ if (holders->ElementAt(i)->ID() == aId) {
+ holder = holders->ElementAt(i);
+ holders->RemoveElementAt(i);
+ return holder.forget();
+ }
+ }
+
+ return nullptr;
+}
+
+already_AddRefed<BufferData>
+OmxPromiseLayer::FindBufferById(OMX_DIRTYPE aType, BufferData::BufferID aId)
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+
+ RefPtr<BufferData> holder;
+ BUFFERLIST* holders = GetBufferHolders(aType);
+
+ for (uint32_t i = 0; i < holders->Length(); i++) {
+ if (holders->ElementAt(i)->ID() == aId) {
+ holder = holders->ElementAt(i);
+ return holder.forget();
+ }
+ }
+
+ return nullptr;
+}
+
+void
+OmxPromiseLayer::EmptyFillBufferDone(OMX_DIRTYPE aType, BufferData* aData)
+{
+ if (aData) {
+ LOG("type %d, buffer %p", aType, aData->mBuffer);
+ if (aType == OMX_DirOutput) {
+ aData->mRawData = nullptr;
+ aData->mRawData = FindAndRemoveRawData(aData->mBuffer->nTimeStamp);
+ }
+ aData->mStatus = BufferData::BufferStatus::OMX_CLIENT;
+ aData->mPromise.Resolve(aData, __func__);
+ } else {
+ LOG("type %d, no buffer", aType);
+ }
+}
+
+void
+OmxPromiseLayer::EmptyFillBufferDone(OMX_DIRTYPE aType, BufferData::BufferID aID)
+{
+ RefPtr<BufferData> holder = FindAndRemoveBufferHolder(aType, aID);
+ EmptyFillBufferDone(aType, holder);
+}
+
+RefPtr<OmxPromiseLayer::OmxCommandPromise>
+OmxPromiseLayer::SendCommand(OMX_COMMANDTYPE aCmd, OMX_U32 aParam1, OMX_PTR aCmdData)
+{
+ if (aCmd == OMX_CommandFlush) {
+ // It doesn't support another flush commands before previous one is completed.
+ MOZ_RELEASE_ASSERT(!mFlushCommands.Length());
+
+ // Some coomponents don't send event with OMX_ALL, they send flush complete
+ // event with input port and another event for output port.
+ // In prupose of better compatibility, we interpret the OMX_ALL to OMX_DirInput
+ // and OMX_DirOutput flush separately.
+ OMX_DIRTYPE types[] = {OMX_DIRTYPE::OMX_DirInput, OMX_DIRTYPE::OMX_DirOutput};
+ for(const auto type : types) {
+ if ((aParam1 == type) || (aParam1 == OMX_ALL)) {
+ mFlushCommands.AppendElement(FlushCommand({type, aCmdData}));
+ }
+
+ if (type == OMX_DirInput) {
+ // Clear all buffered raw data.
+ mRawDatas.Clear();
+ }
+ }
+
+ // Don't overlay more than one flush command, some components can't overlay flush commands.
+ // So here we send another flush after receiving the previous flush completed event.
+ if (mFlushCommands.Length()) {
+ OMX_ERRORTYPE err =
+ mPlatformLayer->SendCommand(OMX_CommandFlush,
+ mFlushCommands.ElementAt(0).type,
+ mFlushCommands.ElementAt(0).cmd);
+ if (err != OMX_ErrorNone) {
+ OmxCommandFailureHolder failure(OMX_ErrorNotReady, OMX_CommandFlush);
+ return OmxCommandPromise::CreateAndReject(failure, __func__);
+ }
+ } else {
+ LOG("OMX_CommandFlush parameter error");
+ OmxCommandFailureHolder failure(OMX_ErrorNotReady, OMX_CommandFlush);
+ return OmxCommandPromise::CreateAndReject(failure, __func__);
+ }
+ } else {
+ OMX_ERRORTYPE err = mPlatformLayer->SendCommand(aCmd, aParam1, aCmdData);
+ if (err != OMX_ErrorNone) {
+ OmxCommandFailureHolder failure(OMX_ErrorNotReady, aCmd);
+ return OmxCommandPromise::CreateAndReject(failure, __func__);
+ }
+ }
+
+ RefPtr<OmxCommandPromise> p;
+ if (aCmd == OMX_CommandStateSet) {
+ p = mCommandStatePromise.Ensure(__func__);
+ } else if (aCmd == OMX_CommandFlush) {
+ p = mFlushPromise.Ensure(__func__);
+ } else if (aCmd == OMX_CommandPortEnable) {
+ p = mPortEnablePromise.Ensure(__func__);
+ } else if (aCmd == OMX_CommandPortDisable) {
+ p = mPortDisablePromise.Ensure(__func__);
+ } else {
+ LOG("error unsupport command");
+ MOZ_ASSERT(0);
+ }
+
+ return p;
+}
+
+bool
+OmxPromiseLayer::Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2)
+{
+ OMX_COMMANDTYPE cmd = (OMX_COMMANDTYPE) aData1;
+ switch (aEvent) {
+ case OMX_EventCmdComplete:
+ {
+ if (cmd == OMX_CommandStateSet) {
+ mCommandStatePromise.Resolve(OMX_CommandStateSet, __func__);
+ } else if (cmd == OMX_CommandFlush) {
+ MOZ_RELEASE_ASSERT(mFlushCommands.ElementAt(0).type == aData2);
+ LOG("OMX_CommandFlush completed port type %d", aData2);
+ mFlushCommands.RemoveElementAt(0);
+
+ // Sending next flush command.
+ if (mFlushCommands.Length()) {
+ OMX_ERRORTYPE err =
+ mPlatformLayer->SendCommand(OMX_CommandFlush,
+ mFlushCommands.ElementAt(0).type,
+ mFlushCommands.ElementAt(0).cmd);
+ if (err != OMX_ErrorNone) {
+ OmxCommandFailureHolder failure(OMX_ErrorNotReady, OMX_CommandFlush);
+ mFlushPromise.Reject(failure, __func__);
+ }
+ } else {
+ mFlushPromise.Resolve(OMX_CommandFlush, __func__);
+ }
+ } else if (cmd == OMX_CommandPortDisable) {
+ mPortDisablePromise.Resolve(OMX_CommandPortDisable, __func__);
+ } else if (cmd == OMX_CommandPortEnable) {
+ mPortEnablePromise.Resolve(OMX_CommandPortEnable, __func__);
+ }
+ break;
+ }
+ case OMX_EventError:
+ {
+ if (cmd == OMX_CommandStateSet) {
+ OmxCommandFailureHolder failure(OMX_ErrorUndefined, OMX_CommandStateSet);
+ mCommandStatePromise.Reject(failure, __func__);
+ } else if (cmd == OMX_CommandFlush) {
+ OmxCommandFailureHolder failure(OMX_ErrorUndefined, OMX_CommandFlush);
+ mFlushPromise.Reject(failure, __func__);
+ } else if (cmd == OMX_CommandPortDisable) {
+ OmxCommandFailureHolder failure(OMX_ErrorUndefined, OMX_CommandPortDisable);
+ mPortDisablePromise.Reject(failure, __func__);
+ } else if (cmd == OMX_CommandPortEnable) {
+ OmxCommandFailureHolder failure(OMX_ErrorUndefined, OMX_CommandPortEnable);
+ mPortEnablePromise.Reject(failure, __func__);
+ } else {
+ return false;
+ }
+ break;
+ }
+ default:
+ {
+ return false;
+ }
+ }
+ return true;
+}
+
+nsresult
+OmxPromiseLayer::AllocateOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBuffers)
+{
+ return mPlatformLayer->AllocateOmxBuffer(aType, aBuffers);
+}
+
+nsresult
+OmxPromiseLayer::ReleaseOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBuffers)
+{
+ return mPlatformLayer->ReleaseOmxBuffer(aType, aBuffers);
+}
+
+OMX_STATETYPE
+OmxPromiseLayer::GetState()
+{
+ OMX_STATETYPE state;
+ OMX_ERRORTYPE err = mPlatformLayer->GetState(&state);
+ return err == OMX_ErrorNone ? state : OMX_StateInvalid;
+}
+
+OMX_ERRORTYPE
+OmxPromiseLayer::GetParameter(OMX_INDEXTYPE aParamIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize)
+{
+ return mPlatformLayer->GetParameter(aParamIndex,
+ aComponentParameterStructure,
+ aComponentParameterSize);
+}
+
+OMX_ERRORTYPE
+OmxPromiseLayer::SetParameter(OMX_INDEXTYPE aParamIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize)
+{
+ return mPlatformLayer->SetParameter(aParamIndex,
+ aComponentParameterStructure,
+ aComponentParameterSize);
+}
+
+OMX_U32
+OmxPromiseLayer::InputPortIndex()
+{
+ return mPlatformLayer->InputPortIndex();
+}
+
+OMX_U32
+OmxPromiseLayer::OutputPortIndex()
+{
+ return mPlatformLayer->OutputPortIndex();
+}
+
+nsresult
+OmxPromiseLayer::Shutdown()
+{
+ LOG("");
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ MOZ_ASSERT(!GetBufferHolders(OMX_DirInput)->Length());
+ MOZ_ASSERT(!GetBufferHolders(OMX_DirOutput)->Length());
+ return mPlatformLayer->Shutdown();
+}
+
+}
diff --git a/dom/media/platforms/omx/OmxPromiseLayer.h b/dom/media/platforms/omx/OmxPromiseLayer.h
new file mode 100644
index 000000000..ecc6b2a9d
--- /dev/null
+++ b/dom/media/platforms/omx/OmxPromiseLayer.h
@@ -0,0 +1,252 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(OmxPromiseLayer_h_)
+#define OmxPromiseLayer_h_
+
+#include "mozilla/MozPromise.h"
+#include "mozilla/TaskQueue.h"
+#include "nsAutoPtr.h"
+
+#include "OMX_Core.h"
+#include "OMX_Types.h"
+
+namespace mozilla {
+
+namespace layers
+{
+class ImageContainer;
+}
+
+class MediaData;
+class MediaRawData;
+class OmxDataDecoder;
+class OmxPlatformLayer;
+class TrackInfo;
+
+/* This class acts as a middle layer between OmxDataDecoder and the underlying
+ * OmxPlatformLayer.
+ *
+ * This class has two purposes:
+ * 1. Using promise instead of OpenMax async callback function.
+ * For example, OmxCommandPromise is used for OpenMax IL SendCommand.
+ * 2. Manage the buffer exchanged between client and component.
+ * Because omx buffer works crossing threads, so each omx buffer has its own
+ * promise, it is defined in BufferData.
+ *
+ * All of functions and members in this class should be run in the same
+ * TaskQueue.
+ */
+class OmxPromiseLayer {
+protected:
+ virtual ~OmxPromiseLayer() {}
+
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OmxPromiseLayer)
+
+ OmxPromiseLayer(TaskQueue* aTaskQueue,
+ OmxDataDecoder* aDataDecoder,
+ layers::ImageContainer* aImageContainer);
+
+ class BufferData;
+
+ typedef nsTArray<RefPtr<BufferData>> BUFFERLIST;
+
+ class OmxBufferFailureHolder {
+ public:
+ OmxBufferFailureHolder(OMX_ERRORTYPE aError, BufferData* aBuffer)
+ : mError(aError)
+ , mBuffer(aBuffer)
+ {}
+
+ OMX_ERRORTYPE mError;
+ BufferData* mBuffer;
+ };
+
+ typedef MozPromise<BufferData*, OmxBufferFailureHolder, /* IsExclusive = */ false> OmxBufferPromise;
+
+ class OmxCommandFailureHolder {
+ public:
+ OmxCommandFailureHolder(OMX_ERRORTYPE aErrorType,
+ OMX_COMMANDTYPE aCommandType)
+ : mErrorType(aErrorType)
+ , mCommandType(aCommandType)
+ {}
+
+ OMX_ERRORTYPE mErrorType;
+ OMX_COMMANDTYPE mCommandType;
+ };
+
+ typedef MozPromise<OMX_COMMANDTYPE, OmxCommandFailureHolder, /* IsExclusive = */ true> OmxCommandPromise;
+
+ typedef MozPromise<uint32_t, bool, /* IsExclusive = */ true> OmxPortConfigPromise;
+
+ // TODO: maybe a generic promise is good enough for this case?
+ RefPtr<OmxCommandPromise> Init(const TrackInfo* aInfo);
+
+ OMX_ERRORTYPE Config();
+
+ RefPtr<OmxBufferPromise> FillBuffer(BufferData* aData);
+
+ RefPtr<OmxBufferPromise> EmptyBuffer(BufferData* aData);
+
+ RefPtr<OmxCommandPromise> SendCommand(OMX_COMMANDTYPE aCmd,
+ OMX_U32 aParam1,
+ OMX_PTR aCmdData);
+
+ nsresult AllocateOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBuffers);
+
+ nsresult ReleaseOmxBuffer(OMX_DIRTYPE aType, BUFFERLIST* aBuffers);
+
+ OMX_STATETYPE GetState();
+
+ OMX_ERRORTYPE GetParameter(OMX_INDEXTYPE aParamIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize);
+
+ OMX_ERRORTYPE SetParameter(OMX_INDEXTYPE nIndex,
+ OMX_PTR aComponentParameterStructure,
+ OMX_U32 aComponentParameterSize);
+
+ OMX_U32 InputPortIndex();
+
+ OMX_U32 OutputPortIndex();
+
+ nsresult Shutdown();
+
+ // BufferData maintains the status of OMX buffer (OMX_BUFFERHEADERTYPE).
+ // mStatus tracks the buffer owner.
+ // And a promise because OMX buffer working among different threads.
+ class BufferData {
+ protected:
+ virtual ~BufferData() {}
+
+ public:
+ explicit BufferData(OMX_BUFFERHEADERTYPE* aBuffer)
+ : mEos(false)
+ , mStatus(BufferStatus::FREE)
+ , mBuffer(aBuffer)
+ {}
+
+ typedef void* BufferID;
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BufferData)
+
+ // In most cases, the ID of this buffer is the pointer address of mBuffer.
+ // However, in platform like gonk, it is another value.
+ virtual BufferID ID()
+ {
+ return mBuffer;
+ }
+
+ // Return the platform dependent MediaData().
+ // For example, it returns the MediaData with Gralloc texture.
+ // If it returns nullptr, then caller uses the normal way to
+ // create MediaData().
+ virtual already_AddRefed<MediaData> GetPlatformMediaData()
+ {
+ return nullptr;
+ }
+
+ // The buffer could be used by several objects. And only one object owns the
+ // buffer the same time.
+ // FREE:
+ // nobody uses it.
+ //
+ // OMX_COMPONENT:
+ // buffer is used by OMX component (OmxPlatformLayer).
+ //
+ // OMX_CLIENT:
+ // buffer is used by client which is wait for audio/video playing
+ // (OmxDataDecoder)
+ //
+ // OMX_CLIENT_OUTPUT:
+ // used by client to output decoded data (for example, Gecko layer in
+ // this case)
+ //
+ // For output port buffer, the status transition is:
+ // FREE -> OMX_COMPONENT -> OMX_CLIENT -> OMX_CLIENT_OUTPUT -> FREE
+ //
+ // For input port buffer, the status transition is:
+ // FREE -> OMX_COMPONENT -> OMX_CLIENT -> FREE
+ //
+ enum BufferStatus {
+ FREE,
+ OMX_COMPONENT,
+ OMX_CLIENT,
+ OMX_CLIENT_OUTPUT,
+ INVALID
+ };
+
+ bool mEos;
+
+ // The raw keeps in OmxPromiseLayer after EmptyBuffer and then passing to
+ // output decoded buffer in EmptyFillBufferDone. It is used to keep the
+ // records of the original data from demuxer, like duration, stream offset...etc.
+ RefPtr<MediaRawData> mRawData;
+
+ // Because OMX buffer works across threads, so it uses a promise
+ // for each buffer when the buffer is used by Omx component.
+ MozPromiseHolder<OmxBufferPromise> mPromise;
+ BufferStatus mStatus;
+ OMX_BUFFERHEADERTYPE* mBuffer;
+ };
+
+ void EmptyFillBufferDone(OMX_DIRTYPE aType, BufferData::BufferID aID);
+
+ void EmptyFillBufferDone(OMX_DIRTYPE aType, BufferData* aData);
+
+ already_AddRefed<BufferData>
+ FindBufferById(OMX_DIRTYPE aType, BufferData::BufferID aId);
+
+ already_AddRefed<BufferData>
+ FindAndRemoveBufferHolder(OMX_DIRTYPE aType, BufferData::BufferID aId);
+
+ // Return true if event is handled.
+ bool Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2);
+
+protected:
+ struct FlushCommand {
+ OMX_DIRTYPE type;
+ OMX_PTR cmd;
+ };
+
+ BUFFERLIST* GetBufferHolders(OMX_DIRTYPE aType);
+
+ already_AddRefed<MediaRawData> FindAndRemoveRawData(OMX_TICKS aTimecode);
+
+ RefPtr<TaskQueue> mTaskQueue;
+
+ MozPromiseHolder<OmxCommandPromise> mCommandStatePromise;
+
+ MozPromiseHolder<OmxCommandPromise> mPortDisablePromise;
+
+ MozPromiseHolder<OmxCommandPromise> mPortEnablePromise;
+
+ MozPromiseHolder<OmxCommandPromise> mFlushPromise;
+
+ nsTArray<FlushCommand> mFlushCommands;
+
+ nsAutoPtr<OmxPlatformLayer> mPlatformLayer;
+
+private:
+ // Elements are added to holders when FillBuffer() or FillBuffer(). And
+ // removing element when the promise is resolved. Buffers in these lists
+ // should NOT be used by other component; for example, output it to audio
+ // output. These lists should be empty when engine is about to shutdown.
+ //
+ // Note:
+ // There bufferlist should not be used by other class directly.
+ BUFFERLIST mInbufferHolders;
+
+ BUFFERLIST mOutbufferHolders;
+
+ nsTArray<RefPtr<MediaRawData>> mRawDatas;
+};
+
+}
+
+#endif /* OmxPromiseLayer_h_ */
diff --git a/dom/media/platforms/omx/moz.build b/dom/media/platforms/omx/moz.build
new file mode 100644
index 000000000..9f641d937
--- /dev/null
+++ b/dom/media/platforms/omx/moz.build
@@ -0,0 +1,57 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'OmxDecoderModule.h',
+]
+
+UNIFIED_SOURCES += [
+ 'OmxDataDecoder.cpp',
+ 'OmxDecoderModule.cpp',
+ 'OmxPlatformLayer.cpp',
+ 'OmxPromiseLayer.cpp',
+]
+
+LOCAL_INCLUDES += [
+ '/media/openmax_il/il112',
+]
+
+include('/ipc/chromium/chromium-config.mozbuild')
+
+if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gonk' and (CONFIG['ANDROID_VERSION'] == '19' or CONFIG['ANDROID_VERSION'] == '20'):
+ # Suppress some GCC/clang warnings being treated as errors:
+ # - about attributes on forward declarations for types that are already
+ # defined, which complains about an important MOZ_EXPORT for android::AString
+ # - about multi-character constants which are used in codec-related code
+ if CONFIG['GNU_CC'] or CONFIG['CLANG_CL']:
+ CXXFLAGS += [
+ '-Wno-error=attributes',
+ '-Wno-error=multichar'
+ ]
+ CXXFLAGS += [
+ '-I%s/%s' % (CONFIG['ANDROID_SOURCE'], d) for d in [
+ 'frameworks/base/include/binder',
+ 'frameworks/base/include/utils',
+ ]
+ ]
+ UNIFIED_SOURCES += [
+ 'GonkOmxPlatformLayer.cpp',
+ ]
+ EXTRA_DSO_LDOPTS += [
+ '-libbinder',
+ ]
+
+FINAL_LIBRARY = 'xul'
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']
+
+if CONFIG['_MSC_VER']:
+ # Avoid warnings from third-party code that we can not modify.
+ if CONFIG['CLANG_CL']:
+ CXXFLAGS += ['-Wno-invalid-source-encoding']
+ else:
+ CXXFLAGS += ['-validate-charset-']
diff --git a/dom/media/platforms/wmf/DXVA2Manager.cpp b/dom/media/platforms/wmf/DXVA2Manager.cpp
new file mode 100644
index 000000000..9fdb0fa20
--- /dev/null
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -0,0 +1,955 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DXVA2Manager.h"
+#include <d3d11.h>
+#include "nsThreadUtils.h"
+#include "ImageContainer.h"
+#include "gfxWindowsPlatform.h"
+#include "D3D9SurfaceImage.h"
+#include "mozilla/gfx/DeviceManagerDx.h"
+#include "mozilla/layers/D3D11ShareHandleImage.h"
+#include "mozilla/layers/ImageBridgeChild.h"
+#include "mozilla/layers/TextureForwarder.h"
+#include "mozilla/Telemetry.h"
+#include "MediaTelemetryConstants.h"
+#include "mfapi.h"
+#include "gfxPrefs.h"
+#include "MFTDecoder.h"
+#include "DriverCrashGuard.h"
+#include "nsPrintfCString.h"
+#include "gfxCrashReporterUtils.h"
+#include "VideoUtils.h"
+
+const CLSID CLSID_VideoProcessorMFT =
+{
+ 0x88753b26,
+ 0x5b24,
+ 0x49bd,
+ { 0xb2, 0xe7, 0xc, 0x44, 0x5c, 0x78, 0xc9, 0x82 }
+};
+
+const GUID MF_XVP_PLAYBACK_MODE =
+{
+ 0x3c5d293f,
+ 0xad67,
+ 0x4e29,
+ { 0xaf, 0x12, 0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9 }
+};
+
+DEFINE_GUID(MF_LOW_LATENCY,
+ 0x9c27891a, 0xed7a, 0x40e1, 0x88, 0xe8, 0xb2, 0x27, 0x27, 0xa0, 0x24, 0xee);
+
+// R600, R700, Evergreen and Cayman AMD cards. These support DXVA via UVD3 or earlier, and don't
+// handle 1080p60 well.
+static const DWORD sAMDPreUVD4[] = {
+ 0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0, 0x94c1, 0x94c3, 0x94c4, 0x94c5,
+ 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb, 0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588,
+ 0x9589, 0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504, 0x9505, 0x9506, 0x9507,
+ 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517, 0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7,
+ 0x95c9, 0x95cc, 0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597, 0x9598, 0x9599,
+ 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615, 0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714,
+ 0x9715, 0x9440, 0x9441, 0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450, 0x9452,
+ 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b, 0x947a, 0x947b, 0x9480, 0x9487, 0x9488,
+ 0x9489, 0x948a, 0x948f, 0x9490, 0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542,
+ 0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1, 0x94a3, 0x94b1, 0x94b3, 0x94b4,
+ 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4, 0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa,
+ 0x68fe, 0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de, 0x68a0, 0x68a1, 0x68a8,
+ 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be, 0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d,
+ 0x6898, 0x6899, 0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806, 0x9807, 0x9808,
+ 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a, 0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643,
+ 0x9644, 0x9645, 0x9649, 0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728, 0x6729,
+ 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744, 0x6745, 0x6746, 0x6747, 0x6748, 0x6749,
+ 0x674a, 0x6750, 0x6751, 0x6758, 0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849,
+ 0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765, 0x6766, 0x6767, 0x6768, 0x6770,
+ 0x6771, 0x6772, 0x6778, 0x6779, 0x677b, 0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707,
+ 0x6708, 0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x9900, 0x9901, 0x9903, 0x9904, 0x9905, 0x9906,
+ 0x9907, 0x9908, 0x9909, 0x990a, 0x990b, 0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918,
+ 0x9919, 0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998, 0x9999, 0x999a, 0x999b,
+ 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4
+};
+
+// The size we use for our synchronization surface.
+// 16x16 is the size recommended by Microsoft (in the D3D9ExDXGISharedSurf sample) that works
+// best to avoid driver bugs.
+static const uint32_t kSyncSurfaceSize = 16;
+
+namespace mozilla {
+
+using layers::Image;
+using layers::ImageContainer;
+using layers::D3D9SurfaceImage;
+using layers::D3D9RecycleAllocator;
+using layers::D3D11ShareHandleImage;
+using layers::D3D11RecycleAllocator;
+
+class D3D9DXVA2Manager : public DXVA2Manager
+{
+public:
+ D3D9DXVA2Manager();
+ virtual ~D3D9DXVA2Manager();
+
+ HRESULT Init(layers::KnowsCompositor* aKnowsCompositor,
+ nsACString& aFailureReason);
+
+ IUnknown* GetDXVADeviceManager() override;
+
+ // Copies a region (aRegion) of the video frame stored in aVideoSample
+ // into an image which is returned by aOutImage.
+ HRESULT CopyToImage(IMFSample* aVideoSample,
+ const nsIntRect& aRegion,
+ Image** aOutImage) override;
+
+ bool SupportsConfig(IMFMediaType* aType, float aFramerate) override;
+
+private:
+ RefPtr<IDirect3D9Ex> mD3D9;
+ RefPtr<IDirect3DDevice9Ex> mDevice;
+ RefPtr<IDirect3DDeviceManager9> mDeviceManager;
+ RefPtr<D3D9RecycleAllocator> mTextureClientAllocator;
+ RefPtr<IDirectXVideoDecoderService> mDecoderService;
+ RefPtr<IDirect3DSurface9> mSyncSurface;
+ GUID mDecoderGUID;
+ UINT32 mResetToken;
+ bool mFirstFrame;
+ bool mIsAMDPreUVD4;
+};
+
+void GetDXVA2ExtendedFormatFromMFMediaType(IMFMediaType *pType,
+ DXVA2_ExtendedFormat *pFormat)
+{
+ // Get the interlace mode.
+ MFVideoInterlaceMode interlace =
+ (MFVideoInterlaceMode)MFGetAttributeUINT32(pType, MF_MT_INTERLACE_MODE, MFVideoInterlace_Unknown);
+
+ if (interlace == MFVideoInterlace_MixedInterlaceOrProgressive) {
+ pFormat->SampleFormat = DXVA2_SampleFieldInterleavedEvenFirst;
+ } else {
+ pFormat->SampleFormat = (UINT)interlace;
+ }
+
+ pFormat->VideoChromaSubsampling =
+ MFGetAttributeUINT32(pType, MF_MT_VIDEO_CHROMA_SITING, MFVideoChromaSubsampling_Unknown);
+ pFormat->NominalRange =
+ MFGetAttributeUINT32(pType, MF_MT_VIDEO_NOMINAL_RANGE, MFNominalRange_Unknown);
+ pFormat->VideoTransferMatrix =
+ MFGetAttributeUINT32(pType, MF_MT_YUV_MATRIX, MFVideoTransferMatrix_Unknown);
+ pFormat->VideoLighting =
+ MFGetAttributeUINT32(pType, MF_MT_VIDEO_LIGHTING, MFVideoLighting_Unknown);
+ pFormat->VideoPrimaries =
+ MFGetAttributeUINT32(pType, MF_MT_VIDEO_PRIMARIES, MFVideoPrimaries_Unknown);
+ pFormat->VideoTransferFunction =
+ MFGetAttributeUINT32(pType, MF_MT_TRANSFER_FUNCTION, MFVideoTransFunc_Unknown);
+}
+
+HRESULT ConvertMFTypeToDXVAType(IMFMediaType *pType, DXVA2_VideoDesc *pDesc)
+{
+ ZeroMemory(pDesc, sizeof(*pDesc));
+
+ // The D3D format is the first DWORD of the subtype GUID.
+ GUID subtype = GUID_NULL;
+ HRESULT hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ pDesc->Format = (D3DFORMAT)subtype.Data1;
+
+ UINT32 width = 0;
+ UINT32 height = 0;
+ hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &width, &height);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ NS_ENSURE_TRUE(width <= MAX_VIDEO_WIDTH, E_FAIL);
+ NS_ENSURE_TRUE(height <= MAX_VIDEO_HEIGHT, E_FAIL);
+ pDesc->SampleWidth = width;
+ pDesc->SampleHeight = height;
+
+ UINT32 fpsNumerator = 0;
+ UINT32 fpsDenominator = 0;
+ hr = MFGetAttributeRatio(pType, MF_MT_FRAME_RATE, &fpsNumerator, &fpsDenominator);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ pDesc->InputSampleFreq.Numerator = fpsNumerator;
+ pDesc->InputSampleFreq.Denominator = fpsDenominator;
+
+ GetDXVA2ExtendedFormatFromMFMediaType(pType, &pDesc->SampleFormat);
+ pDesc->OutputFrameFreq = pDesc->InputSampleFreq;
+ if ((pDesc->SampleFormat.SampleFormat == DXVA2_SampleFieldInterleavedEvenFirst) ||
+ (pDesc->SampleFormat.SampleFormat == DXVA2_SampleFieldInterleavedOddFirst)) {
+ pDesc->OutputFrameFreq.Numerator *= 2;
+ }
+
+ return S_OK;
+}
+
+static const GUID DXVA2_ModeH264_E = {
+ 0x1b81be68, 0xa0c7, 0x11d3, { 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5 }
+};
+
+static const GUID DXVA2_Intel_ModeH264_E = {
+ 0x604F8E68, 0x4951, 0x4c54, { 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6 }
+};
+
+// This tests if a DXVA video decoder can be created for the given media type/resolution.
+// It uses the same decoder device (DXVA2_ModeH264_E - DXVA2_ModeH264_VLD_NoFGT) as the H264
+// decoder MFT provided by windows (CLSID_CMSH264DecoderMFT) uses, so we can use it to determine
+// if the MFT will use software fallback or not.
+bool
+D3D9DXVA2Manager::SupportsConfig(IMFMediaType* aType, float aFramerate)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ gfx::D3D9VideoCrashGuard crashGuard;
+ if (crashGuard.Crashed()) {
+ NS_WARNING("DXVA2D3D9 crash detected");
+ return false;
+ }
+
+ DXVA2_VideoDesc desc;
+ HRESULT hr = ConvertMFTypeToDXVAType(aType, &desc);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ // AMD cards with UVD3 or earlier perform poorly trying to decode 1080p60 in hardware,
+ // so use software instead. Pick 45 as an arbitrary upper bound for the framerate we can
+ // handle.
+ if (mIsAMDPreUVD4 &&
+ (desc.SampleWidth >= 1920 || desc.SampleHeight >= 1088) &&
+ aFramerate > 45) {
+ return false;
+ }
+
+ UINT configCount;
+ DXVA2_ConfigPictureDecode* configs = nullptr;
+ hr = mDecoderService->GetDecoderConfigurations(mDecoderGUID, &desc, nullptr, &configCount, &configs);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ RefPtr<IDirect3DSurface9> surface;
+ hr = mDecoderService->CreateSurface(desc.SampleWidth, desc.SampleHeight, 0, (D3DFORMAT)MAKEFOURCC('N', 'V', '1', '2'),
+ D3DPOOL_DEFAULT, 0, DXVA2_VideoDecoderRenderTarget,
+ surface.StartAssignment(), NULL);
+ if (!SUCCEEDED(hr)) {
+ CoTaskMemFree(configs);
+ return false;
+ }
+
+ for (UINT i = 0; i < configCount; i++) {
+ RefPtr<IDirectXVideoDecoder> decoder;
+ IDirect3DSurface9* surfaces = surface;
+ hr = mDecoderService->CreateVideoDecoder(mDecoderGUID, &desc, &configs[i], &surfaces, 1, decoder.StartAssignment());
+ if (SUCCEEDED(hr) && decoder) {
+ CoTaskMemFree(configs);
+ return true;
+ }
+ }
+ CoTaskMemFree(configs);
+ return false;
+}
+
+D3D9DXVA2Manager::D3D9DXVA2Manager()
+ : mResetToken(0)
+ , mFirstFrame(true)
+ , mIsAMDPreUVD4(false)
+{
+ MOZ_COUNT_CTOR(D3D9DXVA2Manager);
+ MOZ_ASSERT(NS_IsMainThread());
+}
+
+D3D9DXVA2Manager::~D3D9DXVA2Manager()
+{
+ MOZ_COUNT_DTOR(D3D9DXVA2Manager);
+ MOZ_ASSERT(NS_IsMainThread());
+}
+
+IUnknown*
+D3D9DXVA2Manager::GetDXVADeviceManager()
+{
+ MutexAutoLock lock(mLock);
+ return mDeviceManager;
+}
+
+HRESULT
+D3D9DXVA2Manager::Init(layers::KnowsCompositor* aKnowsCompositor,
+ nsACString& aFailureReason)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ ScopedGfxFeatureReporter reporter("DXVA2D3D9");
+
+ gfx::D3D9VideoCrashGuard crashGuard;
+ if (crashGuard.Crashed()) {
+ NS_WARNING("DXVA2D3D9 crash detected");
+ aFailureReason.AssignLiteral("DXVA2D3D9 crashes detected in the past");
+ return E_FAIL;
+ }
+
+ // Create D3D9Ex.
+ HMODULE d3d9lib = LoadLibraryW(L"d3d9.dll");
+ NS_ENSURE_TRUE(d3d9lib, E_FAIL);
+ decltype(Direct3DCreate9Ex)* d3d9Create =
+ (decltype(Direct3DCreate9Ex)*) GetProcAddress(d3d9lib, "Direct3DCreate9Ex");
+ if (!d3d9Create) {
+ NS_WARNING("Couldn't find Direct3DCreate9Ex symbol in d3d9.dll");
+ aFailureReason.AssignLiteral("Couldn't find Direct3DCreate9Ex symbol in d3d9.dll");
+ return E_FAIL;
+ }
+ RefPtr<IDirect3D9Ex> d3d9Ex;
+ HRESULT hr = d3d9Create(D3D_SDK_VERSION, getter_AddRefs(d3d9Ex));
+ if (!d3d9Ex) {
+ NS_WARNING("Direct3DCreate9 failed");
+ aFailureReason.AssignLiteral("Direct3DCreate9 failed");
+ return E_FAIL;
+ }
+
+ // Ensure we can do the YCbCr->RGB conversion in StretchRect.
+ // Fail if we can't.
+ hr = d3d9Ex->CheckDeviceFormatConversion(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ (D3DFORMAT)MAKEFOURCC('N','V','1','2'),
+ D3DFMT_X8R8G8B8);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("CheckDeviceFormatConversion failed with error %X", hr);
+ return hr;
+ }
+
+ // Create D3D9DeviceEx. We pass null HWNDs here even though the documentation
+ // suggests that one of them should not be. At this point in time Chromium
+ // does the same thing for video acceleration.
+ D3DPRESENT_PARAMETERS params = {0};
+ params.BackBufferWidth = 1;
+ params.BackBufferHeight = 1;
+ params.BackBufferFormat = D3DFMT_A8R8G8B8;
+ params.BackBufferCount = 1;
+ params.SwapEffect = D3DSWAPEFFECT_DISCARD;
+ params.hDeviceWindow = nullptr;
+ params.Windowed = TRUE;
+ params.Flags = D3DPRESENTFLAG_VIDEO;
+
+ RefPtr<IDirect3DDevice9Ex> device;
+ hr = d3d9Ex->CreateDeviceEx(D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ nullptr,
+ D3DCREATE_FPU_PRESERVE |
+ D3DCREATE_MULTITHREADED |
+ D3DCREATE_MIXED_VERTEXPROCESSING,
+ &params,
+ nullptr,
+ getter_AddRefs(device));
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("CreateDeviceEx failed with error %X", hr);
+ return hr;
+ }
+
+ // Ensure we can create queries to synchronize operations between devices.
+ // Without this, when we make a copy of the frame in order to share it with
+ // another device, we can't be sure that the copy has finished before the
+ // other device starts using it.
+ RefPtr<IDirect3DQuery9> query;
+
+ hr = device->CreateQuery(D3DQUERYTYPE_EVENT, getter_AddRefs(query));
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("CreateQuery failed with error %X", hr);
+ return hr;
+ }
+
+ // Create and initialize IDirect3DDeviceManager9.
+ UINT resetToken = 0;
+ RefPtr<IDirect3DDeviceManager9> deviceManager;
+
+ hr = wmf::DXVA2CreateDirect3DDeviceManager9(&resetToken,
+ getter_AddRefs(deviceManager));
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("DXVA2CreateDirect3DDeviceManager9 failed with error %X", hr);
+ return hr;
+ }
+ hr = deviceManager->ResetDevice(device, resetToken);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IDirect3DDeviceManager9::ResetDevice failed with error %X", hr);
+ return hr;
+ }
+
+ HANDLE deviceHandle;
+ RefPtr<IDirectXVideoDecoderService> decoderService;
+ hr = deviceManager->OpenDeviceHandle(&deviceHandle);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IDirect3DDeviceManager9::OpenDeviceHandle failed with error %X", hr);
+ return hr;
+ }
+
+ hr = deviceManager->GetVideoService(deviceHandle, IID_PPV_ARGS(decoderService.StartAssignment()));
+ deviceManager->CloseDeviceHandle(deviceHandle);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IDirectXVideoDecoderServer::GetVideoService failed with error %X", hr);
+ return hr;
+ }
+
+ UINT deviceCount;
+ GUID* decoderDevices = nullptr;
+ hr = decoderService->GetDecoderDeviceGuids(&deviceCount, &decoderDevices);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IDirectXVideoDecoderServer::GetDecoderDeviceGuids failed with error %X", hr);
+ return hr;
+ }
+
+ bool found = false;
+ for (UINT i = 0; i < deviceCount; i++) {
+ if (decoderDevices[i] == DXVA2_ModeH264_E ||
+ decoderDevices[i] == DXVA2_Intel_ModeH264_E) {
+ mDecoderGUID = decoderDevices[i];
+ found = true;
+ break;
+ }
+ }
+ CoTaskMemFree(decoderDevices);
+
+ if (!found) {
+ aFailureReason.AssignLiteral("Failed to find an appropriate decoder GUID");
+ return E_FAIL;
+ }
+
+ D3DADAPTER_IDENTIFIER9 adapter;
+ hr = d3d9Ex->GetAdapterIdentifier(D3DADAPTER_DEFAULT, 0, &adapter);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IDirect3D9Ex::GetAdapterIdentifier failed with error %X", hr);
+ return hr;
+ }
+
+ if (adapter.VendorId == 0x1022 && !gfxPrefs::PDMWMFSkipBlacklist()) {
+ for (size_t i = 0; i < MOZ_ARRAY_LENGTH(sAMDPreUVD4); i++) {
+ if (adapter.DeviceId == sAMDPreUVD4[i]) {
+ mIsAMDPreUVD4 = true;
+ break;
+ }
+ }
+ }
+
+ RefPtr<IDirect3DSurface9> syncSurf;
+ hr = device->CreateRenderTarget(kSyncSurfaceSize, kSyncSurfaceSize,
+ D3DFMT_X8R8G8B8, D3DMULTISAMPLE_NONE,
+ 0, TRUE, getter_AddRefs(syncSurf), NULL);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ mDecoderService = decoderService;
+
+ mResetToken = resetToken;
+ mD3D9 = d3d9Ex;
+ mDevice = device;
+ mDeviceManager = deviceManager;
+ mSyncSurface = syncSurf;
+
+ if (layers::ImageBridgeChild::GetSingleton()) {
+ // There's no proper KnowsCompositor for ImageBridge currently (and it
+ // implements the interface), so just use that if it's available.
+ mTextureClientAllocator = new D3D9RecycleAllocator(layers::ImageBridgeChild::GetSingleton().get(),
+ mDevice);
+ } else {
+ mTextureClientAllocator = new D3D9RecycleAllocator(aKnowsCompositor,
+ mDevice);
+ }
+ mTextureClientAllocator->SetMaxPoolSize(5);
+
+ Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
+ uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D9));
+
+ reporter.SetSuccessful();
+
+ return S_OK;
+}
+
+HRESULT
+D3D9DXVA2Manager::CopyToImage(IMFSample* aSample,
+ const nsIntRect& aRegion,
+ Image** aOutImage)
+{
+ RefPtr<IMFMediaBuffer> buffer;
+ HRESULT hr = aSample->GetBufferByIndex(0, getter_AddRefs(buffer));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IDirect3DSurface9> surface;
+ hr = wmf::MFGetService(buffer,
+ MR_BUFFER_SERVICE,
+ IID_IDirect3DSurface9,
+ getter_AddRefs(surface));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<D3D9SurfaceImage> image = new D3D9SurfaceImage();
+ hr = image->AllocateAndCopy(mTextureClientAllocator, surface, aRegion);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IDirect3DSurface9> sourceSurf = image->GetD3D9Surface();
+
+ // Copy a small rect into our sync surface, and then map it
+ // to block until decoding/color conversion completes.
+ RECT copyRect = { 0, 0, kSyncSurfaceSize, kSyncSurfaceSize };
+ hr = mDevice->StretchRect(sourceSurf, &copyRect, mSyncSurface, &copyRect, D3DTEXF_NONE);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ D3DLOCKED_RECT lockedRect;
+ hr = mSyncSurface->LockRect(&lockedRect, NULL, D3DLOCK_READONLY);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = mSyncSurface->UnlockRect();
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ image.forget(aOutImage);
+ return S_OK;
+}
+
+// Count of the number of DXVAManager's we've created. This is also the
+// number of videos we're decoding with DXVA. Use on main thread only.
+static uint32_t sDXVAVideosCount = 0;
+
+/* static */
+DXVA2Manager*
+DXVA2Manager::CreateD3D9DXVA(layers::KnowsCompositor* aKnowsCompositor,
+ nsACString& aFailureReason)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ HRESULT hr;
+
+ // DXVA processing takes up a lot of GPU resources, so limit the number of
+ // videos we use DXVA with at any one time.
+ uint32_t dxvaLimit = gfxPrefs::PDMWMFMaxDXVAVideos();
+
+ if (sDXVAVideosCount == dxvaLimit) {
+ aFailureReason.AssignLiteral("Too many DXVA videos playing");
+ return nullptr;
+ }
+
+ nsAutoPtr<D3D9DXVA2Manager> d3d9Manager(new D3D9DXVA2Manager());
+ hr = d3d9Manager->Init(aKnowsCompositor, aFailureReason);
+ if (SUCCEEDED(hr)) {
+ return d3d9Manager.forget();
+ }
+
+ // No hardware accelerated video decoding. :(
+ return nullptr;
+}
+
+class D3D11DXVA2Manager : public DXVA2Manager
+{
+public:
+ D3D11DXVA2Manager();
+ virtual ~D3D11DXVA2Manager();
+
+ HRESULT Init(layers::KnowsCompositor* aKnowsCompositor,
+ nsACString& aFailureReason);
+
+ IUnknown* GetDXVADeviceManager() override;
+
+ // Copies a region (aRegion) of the video frame stored in aVideoSample
+ // into an image which is returned by aOutImage.
+ HRESULT CopyToImage(IMFSample* aVideoSample,
+ const nsIntRect& aRegion,
+ Image** aOutImage) override;
+
+ HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) override;
+
+ bool IsD3D11() override { return true; }
+
+ bool SupportsConfig(IMFMediaType* aType, float aFramerate) override;
+
+private:
+ HRESULT CreateFormatConverter();
+
+ HRESULT CreateOutputSample(RefPtr<IMFSample>& aSample,
+ ID3D11Texture2D* aTexture);
+
+ RefPtr<ID3D11Device> mDevice;
+ RefPtr<ID3D11DeviceContext> mContext;
+ RefPtr<IMFDXGIDeviceManager> mDXGIDeviceManager;
+ RefPtr<MFTDecoder> mTransform;
+ RefPtr<D3D11RecycleAllocator> mTextureClientAllocator;
+ RefPtr<ID3D11Texture2D> mSyncSurface;
+ GUID mDecoderGUID;
+ uint32_t mWidth;
+ uint32_t mHeight;
+ UINT mDeviceManagerToken;
+ bool mIsAMDPreUVD4;
+};
+
+bool
+D3D11DXVA2Manager::SupportsConfig(IMFMediaType* aType, float aFramerate)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ gfx::D3D11VideoCrashGuard crashGuard;
+ if (crashGuard.Crashed()) {
+ NS_WARNING("DXVA2D3D9 crash detected");
+ return false;
+ }
+
+ RefPtr<ID3D11VideoDevice> videoDevice;
+ HRESULT hr = mDevice->QueryInterface(static_cast<ID3D11VideoDevice**>(getter_AddRefs(videoDevice)));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ D3D11_VIDEO_DECODER_DESC desc;
+ desc.Guid = mDecoderGUID;
+
+ UINT32 width = 0;
+ UINT32 height = 0;
+ hr = MFGetAttributeSize(aType, MF_MT_FRAME_SIZE, &width, &height);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ NS_ENSURE_TRUE(width <= MAX_VIDEO_WIDTH, false);
+ NS_ENSURE_TRUE(height <= MAX_VIDEO_HEIGHT, false);
+ desc.SampleWidth = width;
+ desc.SampleHeight = height;
+
+ desc.OutputFormat = DXGI_FORMAT_NV12;
+
+ // AMD cards with UVD3 or earlier perform poorly trying to decode 1080p60 in hardware,
+ // so use software instead. Pick 45 as an arbitrary upper bound for the framerate we can
+ // handle.
+ if (mIsAMDPreUVD4 &&
+ (desc.SampleWidth >= 1920 || desc.SampleHeight >= 1088) &&
+ aFramerate > 45) {
+ return false;
+ }
+
+ UINT configCount = 0;
+ hr = videoDevice->GetVideoDecoderConfigCount(&desc, &configCount);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ for (UINT i = 0; i < configCount; i++) {
+ D3D11_VIDEO_DECODER_CONFIG config;
+ hr = videoDevice->GetVideoDecoderConfig(&desc, i, &config);
+ if (SUCCEEDED(hr)) {
+ RefPtr<ID3D11VideoDecoder> decoder;
+ hr = videoDevice->CreateVideoDecoder(&desc, &config, decoder.StartAssignment());
+ if (SUCCEEDED(hr) && decoder) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+D3D11DXVA2Manager::D3D11DXVA2Manager()
+ : mWidth(0)
+ , mHeight(0)
+ , mDeviceManagerToken(0)
+ , mIsAMDPreUVD4(false)
+{
+}
+
+D3D11DXVA2Manager::~D3D11DXVA2Manager()
+{
+}
+
+IUnknown*
+D3D11DXVA2Manager::GetDXVADeviceManager()
+{
+ MutexAutoLock lock(mLock);
+ return mDXGIDeviceManager;
+}
+
+HRESULT
+D3D11DXVA2Manager::Init(layers::KnowsCompositor* aKnowsCompositor,
+ nsACString& aFailureReason)
+{
+ HRESULT hr;
+
+ ScopedGfxFeatureReporter reporter("DXVA2D3D11");
+
+ gfx::D3D11VideoCrashGuard crashGuard;
+ if (crashGuard.Crashed()) {
+ NS_WARNING("DXVA2D3D11 crash detected");
+ aFailureReason.AssignLiteral("DXVA2D3D11 crashes detected in the past");
+ return E_FAIL;
+ }
+
+ mDevice = gfx::DeviceManagerDx::Get()->CreateDecoderDevice();
+ if (!mDevice) {
+ aFailureReason.AssignLiteral("Failed to create D3D11 device for decoder");
+ return E_FAIL;
+ }
+
+ mDevice->GetImmediateContext(getter_AddRefs(mContext));
+ if (!mContext) {
+ aFailureReason.AssignLiteral("Failed to get immediate context for d3d11 device");
+ return E_FAIL;
+ }
+
+ hr = wmf::MFCreateDXGIDeviceManager(&mDeviceManagerToken, getter_AddRefs(mDXGIDeviceManager));
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("MFCreateDXGIDeviceManager failed with code %X", hr);
+ return hr;
+ }
+
+ hr = mDXGIDeviceManager->ResetDevice(mDevice, mDeviceManagerToken);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IMFDXGIDeviceManager::ResetDevice failed with code %X", hr);
+ return hr;
+ }
+
+ mTransform = new MFTDecoder();
+ hr = mTransform->Create(CLSID_VideoProcessorMFT);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("MFTDecoder::Create(CLSID_VideoProcessorMFT) failed with code %X", hr);
+ return hr;
+ }
+
+ hr = mTransform->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, ULONG_PTR(mDXGIDeviceManager.get()));
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("MFTDecoder::SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER) failed with code %X", hr);
+ return hr;
+ }
+
+ RefPtr<ID3D11VideoDevice> videoDevice;
+ hr = mDevice->QueryInterface(static_cast<ID3D11VideoDevice**>(getter_AddRefs(videoDevice)));
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("QI to ID3D11VideoDevice failed with code %X", hr);
+ return hr;
+ }
+
+ bool found = false;
+ UINT profileCount = videoDevice->GetVideoDecoderProfileCount();
+ for (UINT i = 0; i < profileCount; i++) {
+ GUID id;
+ hr = videoDevice->GetVideoDecoderProfile(i, &id);
+ if (SUCCEEDED(hr) && (id == DXVA2_ModeH264_E || id == DXVA2_Intel_ModeH264_E)) {
+ mDecoderGUID = id;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ aFailureReason.AssignLiteral("Failed to find an appropriate decoder GUID");
+ return E_FAIL;
+ }
+
+ BOOL nv12Support = false;
+ hr = videoDevice->CheckVideoDecoderFormat(&mDecoderGUID, DXGI_FORMAT_NV12, &nv12Support);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("CheckVideoDecoderFormat failed with code %X", hr);
+ return hr;
+ }
+ if (!nv12Support) {
+ aFailureReason.AssignLiteral("Decoder doesn't support NV12 surfaces");
+ return E_FAIL;
+ }
+
+ RefPtr<IDXGIDevice> dxgiDevice;
+ hr = mDevice->QueryInterface(static_cast<IDXGIDevice**>(getter_AddRefs(dxgiDevice)));
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("QI to IDXGIDevice failed with code %X", hr);
+ return hr;
+ }
+
+ RefPtr<IDXGIAdapter> adapter;
+ hr = dxgiDevice->GetAdapter(adapter.StartAssignment());
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IDXGIDevice::GetAdapter failed with code %X", hr);
+ return hr;
+ }
+
+ DXGI_ADAPTER_DESC adapterDesc;
+ hr = adapter->GetDesc(&adapterDesc);
+ if (!SUCCEEDED(hr)) {
+ aFailureReason = nsPrintfCString("IDXGIAdapter::GetDesc failed with code %X", hr);
+ return hr;
+ }
+
+ if (adapterDesc.VendorId == 0x1022 && !gfxPrefs::PDMWMFSkipBlacklist()) {
+ for (size_t i = 0; i < MOZ_ARRAY_LENGTH(sAMDPreUVD4); i++) {
+ if (adapterDesc.DeviceId == sAMDPreUVD4[i]) {
+ mIsAMDPreUVD4 = true;
+ break;
+ }
+ }
+ }
+
+ D3D11_TEXTURE2D_DESC desc;
+ desc.Width = kSyncSurfaceSize;
+ desc.Height = kSyncSurfaceSize;
+ desc.MipLevels = 1;
+ desc.ArraySize = 1;
+ desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
+ desc.SampleDesc.Count = 1;
+ desc.SampleDesc.Quality = 0;
+ desc.Usage = D3D11_USAGE_STAGING;
+ desc.BindFlags = 0;
+ desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
+ desc.MiscFlags = 0;
+
+ hr = mDevice->CreateTexture2D(&desc, NULL, getter_AddRefs(mSyncSurface));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ if (layers::ImageBridgeChild::GetSingleton()) {
+ // There's no proper KnowsCompositor for ImageBridge currently (and it
+ // implements the interface), so just use that if it's available.
+ mTextureClientAllocator = new D3D11RecycleAllocator(layers::ImageBridgeChild::GetSingleton().get(),
+ mDevice);
+ } else {
+ mTextureClientAllocator = new D3D11RecycleAllocator(aKnowsCompositor,
+ mDevice);
+ }
+ mTextureClientAllocator->SetMaxPoolSize(5);
+
+ Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
+ uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D11));
+
+ reporter.SetSuccessful();
+
+ return S_OK;
+}
+
+HRESULT
+D3D11DXVA2Manager::CreateOutputSample(RefPtr<IMFSample>& aSample, ID3D11Texture2D* aTexture)
+{
+ RefPtr<IMFSample> sample;
+ HRESULT hr = wmf::MFCreateSample(getter_AddRefs(sample));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IMFMediaBuffer> buffer;
+ hr = wmf::MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), aTexture, 0, FALSE, getter_AddRefs(buffer));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ sample->AddBuffer(buffer);
+
+ aSample = sample;
+ return S_OK;
+}
+
+HRESULT
+D3D11DXVA2Manager::CopyToImage(IMFSample* aVideoSample,
+ const nsIntRect& aRegion,
+ Image** aOutImage)
+{
+ NS_ENSURE_TRUE(aVideoSample, E_POINTER);
+ NS_ENSURE_TRUE(aOutImage, E_POINTER);
+
+ // Our video frame is stored in a non-sharable ID3D11Texture2D. We need
+ // to create a copy of that frame as a sharable resource, save its share
+ // handle, and put that handle into the rendering pipeline.
+
+ RefPtr<D3D11ShareHandleImage> image =
+ new D3D11ShareHandleImage(gfx::IntSize(mWidth, mHeight), aRegion);
+ bool ok = image->AllocateTexture(mTextureClientAllocator, mDevice);
+ NS_ENSURE_TRUE(ok, E_FAIL);
+
+ HRESULT hr = mTransform->Input(aVideoSample);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IMFSample> sample;
+ RefPtr<ID3D11Texture2D> texture = image->GetTexture();
+ hr = CreateOutputSample(sample, texture);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = mTransform->Output(&sample);
+
+ RefPtr<ID3D11DeviceContext> ctx;
+ mDevice->GetImmediateContext(getter_AddRefs(ctx));
+
+ // Copy a small rect into our sync surface, and then map it
+ // to block until decoding/color conversion completes.
+ D3D11_BOX rect = { 0, 0, 0, kSyncSurfaceSize, kSyncSurfaceSize, 1 };
+ ctx->CopySubresourceRegion(mSyncSurface, 0, 0, 0, 0, texture, 0, &rect);
+
+ D3D11_MAPPED_SUBRESOURCE mapped;
+ hr = ctx->Map(mSyncSurface, 0, D3D11_MAP_READ, 0, &mapped);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ ctx->Unmap(mSyncSurface, 0);
+
+ image.forget(aOutImage);
+
+ return S_OK;
+}
+
+HRESULT ConfigureOutput(IMFMediaType* aOutput, void* aData)
+{
+ HRESULT hr = aOutput->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = aOutput->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ gfx::IntSize* size = reinterpret_cast<gfx::IntSize*>(aData);
+ hr = MFSetAttributeSize(aOutput, MF_MT_FRAME_SIZE, size->width, size->height);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ return S_OK;
+}
+
+HRESULT
+D3D11DXVA2Manager::ConfigureForSize(uint32_t aWidth, uint32_t aHeight)
+{
+ mWidth = aWidth;
+ mHeight = aHeight;
+
+ RefPtr<IMFMediaType> inputType;
+ HRESULT hr = wmf::MFCreateMediaType(getter_AddRefs(inputType));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = inputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = inputType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IMFAttributes> attr = mTransform->GetAttributes();
+
+ hr = attr->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = attr->SetUINT32(MF_LOW_LATENCY, FALSE);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, aWidth, aHeight);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IMFMediaType> outputType;
+ hr = wmf::MFCreateMediaType(getter_AddRefs(outputType));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ gfx::IntSize size(mWidth, mHeight);
+ hr = mTransform->SetMediaTypes(inputType, outputType, ConfigureOutput, &size);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ return S_OK;
+}
+
+/* static */
+DXVA2Manager*
+DXVA2Manager::CreateD3D11DXVA(layers::KnowsCompositor* aKnowsCompositor,
+ nsACString& aFailureReason)
+{
+ // DXVA processing takes up a lot of GPU resources, so limit the number of
+ // videos we use DXVA with at any one time.
+ uint32_t dxvaLimit = gfxPrefs::PDMWMFMaxDXVAVideos();
+
+ if (sDXVAVideosCount == dxvaLimit) {
+ aFailureReason.AssignLiteral("Too many DXVA videos playing");
+ return nullptr;
+ }
+
+ nsAutoPtr<D3D11DXVA2Manager> manager(new D3D11DXVA2Manager());
+ HRESULT hr = manager->Init(aKnowsCompositor, aFailureReason);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+
+ return manager.forget();
+}
+
+DXVA2Manager::DXVA2Manager()
+ : mLock("DXVA2Manager")
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ ++sDXVAVideosCount;
+}
+
+DXVA2Manager::~DXVA2Manager()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ --sDXVAVideosCount;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/DXVA2Manager.h b/dom/media/platforms/wmf/DXVA2Manager.h
new file mode 100644
index 000000000..0bdc02dd4
--- /dev/null
+++ b/dom/media/platforms/wmf/DXVA2Manager.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+#if !defined(DXVA2Manager_h_)
+#define DXVA2Manager_h_
+
+#include "WMF.h"
+#include "nsAutoPtr.h"
+#include "mozilla/Mutex.h"
+#include "nsRect.h"
+
+namespace mozilla {
+
+namespace layers {
+class Image;
+class ImageContainer;
+class KnowsCompositor;
+}
+
+class DXVA2Manager {
+public:
+
+ // Creates and initializes a DXVA2Manager. We can use DXVA2 via either
+ // D3D9Ex or D3D11.
+ static DXVA2Manager* CreateD3D9DXVA(layers::KnowsCompositor* aKnowsCompositor, nsACString& aFailureReason);
+ static DXVA2Manager* CreateD3D11DXVA(layers::KnowsCompositor* aKnowsCompositor, nsACString& aFailureReason);
+
+ // Returns a pointer to the D3D device manager responsible for managing the
+ // device we're using for hardware accelerated video decoding. If we're using
+ // D3D9Ex, this is an IDirect3DDeviceManager9. For D3D11 this is an
+ // IMFDXGIDeviceManager. It is safe to call this on any thread.
+ virtual IUnknown* GetDXVADeviceManager() = 0;
+
+ // Creates an Image for the video frame stored in aVideoSample.
+ virtual HRESULT CopyToImage(IMFSample* aVideoSample,
+ const nsIntRect& aRegion,
+ layers::Image** aOutImage) = 0;
+
+ virtual HRESULT ConfigureForSize(uint32_t aWidth, uint32_t aHeight) { return S_OK; }
+
+ virtual bool IsD3D11() { return false; }
+
+ virtual ~DXVA2Manager();
+
+ virtual bool SupportsConfig(IMFMediaType* aType, float aFramerate) = 0;
+
+protected:
+ Mutex mLock;
+ DXVA2Manager();
+};
+
+} // namespace mozilla
+
+#endif // DXVA2Manager_h_
diff --git a/dom/media/platforms/wmf/MFTDecoder.cpp b/dom/media/platforms/wmf/MFTDecoder.cpp
new file mode 100644
index 000000000..e634fcff9
--- /dev/null
+++ b/dom/media/platforms/wmf/MFTDecoder.cpp
@@ -0,0 +1,305 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MFTDecoder.h"
+#include "nsThreadUtils.h"
+#include "WMFUtils.h"
+#include "mozilla/Logging.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+MFTDecoder::MFTDecoder()
+ : mMFTProvidesOutputSamples(false)
+ , mDiscontinuity(true)
+{
+ memset(&mInputStreamInfo, 0, sizeof(MFT_INPUT_STREAM_INFO));
+ memset(&mOutputStreamInfo, 0, sizeof(MFT_OUTPUT_STREAM_INFO));
+}
+
+MFTDecoder::~MFTDecoder()
+{
+}
+
+HRESULT
+MFTDecoder::Create(const GUID& aMFTClsID)
+{
+ // Create the IMFTransform to do the decoding.
+ HRESULT hr;
+ hr = CoCreateInstance(aMFTClsID,
+ nullptr,
+ CLSCTX_INPROC_SERVER,
+ IID_IMFTransform,
+ reinterpret_cast<void**>(static_cast<IMFTransform**>(getter_AddRefs(mDecoder))));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ return S_OK;
+}
+
+HRESULT
+MFTDecoder::SetMediaTypes(IMFMediaType* aInputType,
+ IMFMediaType* aOutputType,
+ ConfigureOutputCallback aCallback,
+ void* aData)
+{
+ mOutputType = aOutputType;
+
+ // Set the input type to the one the caller gave us...
+ HRESULT hr = mDecoder->SetInputType(0, aInputType, 0);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = SetDecoderOutputType(aCallback, aData);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = mDecoder->GetInputStreamInfo(0, &mInputStreamInfo);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ return S_OK;
+}
+
+already_AddRefed<IMFAttributes>
+MFTDecoder::GetAttributes()
+{
+ RefPtr<IMFAttributes> attr;
+ HRESULT hr = mDecoder->GetAttributes(getter_AddRefs(attr));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
+ return attr.forget();
+}
+
+HRESULT
+MFTDecoder::SetDecoderOutputType(ConfigureOutputCallback aCallback, void* aData)
+{
+ NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
+
+ // Iterate the enumerate the output types, until we find one compatible
+ // with what we need.
+ HRESULT hr;
+ RefPtr<IMFMediaType> outputType;
+ UINT32 typeIndex = 0;
+ while (SUCCEEDED(mDecoder->GetOutputAvailableType(0, typeIndex++, getter_AddRefs(outputType)))) {
+ BOOL resultMatch;
+ hr = mOutputType->Compare(outputType, MF_ATTRIBUTES_MATCH_OUR_ITEMS, &resultMatch);
+ if (SUCCEEDED(hr) && resultMatch == TRUE) {
+ if (aCallback) {
+ hr = aCallback(outputType, aData);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ }
+ hr = mDecoder->SetOutputType(0, outputType, 0);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = mDecoder->GetOutputStreamInfo(0, &mOutputStreamInfo);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ mMFTProvidesOutputSamples = IsFlagSet(mOutputStreamInfo.dwFlags, MFT_OUTPUT_STREAM_PROVIDES_SAMPLES);
+
+ return S_OK;
+ }
+ outputType = nullptr;
+ }
+ return E_FAIL;
+}
+
+HRESULT
+MFTDecoder::SendMFTMessage(MFT_MESSAGE_TYPE aMsg, ULONG_PTR aData)
+{
+ NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
+ HRESULT hr = mDecoder->ProcessMessage(aMsg, aData);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ return S_OK;
+}
+
+HRESULT
+MFTDecoder::CreateInputSample(const uint8_t* aData,
+ uint32_t aDataSize,
+ int64_t aTimestamp,
+ RefPtr<IMFSample>* aOutSample)
+{
+ NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
+
+ HRESULT hr;
+ RefPtr<IMFSample> sample;
+ hr = wmf::MFCreateSample(getter_AddRefs(sample));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IMFMediaBuffer> buffer;
+ int32_t bufferSize = std::max<uint32_t>(uint32_t(mInputStreamInfo.cbSize), aDataSize);
+ UINT32 alignment = (mInputStreamInfo.cbAlignment > 1) ? mInputStreamInfo.cbAlignment - 1 : 0;
+ hr = wmf::MFCreateAlignedMemoryBuffer(bufferSize, alignment, getter_AddRefs(buffer));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ DWORD maxLength = 0;
+ DWORD currentLength = 0;
+ BYTE* dst = nullptr;
+ hr = buffer->Lock(&dst, &maxLength, &currentLength);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ // Copy data into sample's buffer.
+ memcpy(dst, aData, aDataSize);
+
+ hr = buffer->Unlock();
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = buffer->SetCurrentLength(aDataSize);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = sample->AddBuffer(buffer);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = sample->SetSampleTime(UsecsToHNs(aTimestamp));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ *aOutSample = sample.forget();
+
+ return S_OK;
+}
+
+HRESULT
+MFTDecoder::CreateOutputSample(RefPtr<IMFSample>* aOutSample)
+{
+ NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
+
+ HRESULT hr;
+ RefPtr<IMFSample> sample;
+ hr = wmf::MFCreateSample(getter_AddRefs(sample));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ RefPtr<IMFMediaBuffer> buffer;
+ int32_t bufferSize = mOutputStreamInfo.cbSize;
+ UINT32 alignment = (mOutputStreamInfo.cbAlignment > 1) ? mOutputStreamInfo.cbAlignment - 1 : 0;
+ hr = wmf::MFCreateAlignedMemoryBuffer(bufferSize, alignment, getter_AddRefs(buffer));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = sample->AddBuffer(buffer);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ *aOutSample = sample.forget();
+
+ return S_OK;
+}
+
+HRESULT
+MFTDecoder::Output(RefPtr<IMFSample>* aOutput)
+{
+ NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
+
+ HRESULT hr;
+
+ MFT_OUTPUT_DATA_BUFFER output = {0};
+
+ bool providedSample = false;
+ RefPtr<IMFSample> sample;
+ if (*aOutput) {
+ output.pSample = *aOutput;
+ providedSample = true;
+ } else if (!mMFTProvidesOutputSamples) {
+ hr = CreateOutputSample(&sample);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ output.pSample = sample;
+ }
+
+ DWORD status = 0;
+ hr = mDecoder->ProcessOutput(0, 1, &output, &status);
+ if (output.pEvents) {
+ // We must release this, as per the IMFTransform::ProcessOutput()
+ // MSDN documentation.
+ output.pEvents->Release();
+ output.pEvents = nullptr;
+ }
+
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ // Type change, probably geometric aperture change.
+ // Reconfigure decoder output type, so that GetOutputMediaType()
+ // returns the new type, and return the error code to caller.
+ // This is an expected failure, so don't warn on encountering it.
+ hr = SetDecoderOutputType(nullptr, nullptr);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ // Return the error, so that the caller knows to retry.
+ return MF_E_TRANSFORM_STREAM_CHANGE;
+ }
+
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ // Not enough input to produce output. This is an expected failure,
+ // so don't warn on encountering it.
+ return hr;
+ }
+ // Treat other errors as unexpected, and warn.
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ if (!output.pSample) {
+ return S_OK;
+ }
+
+ if (mDiscontinuity) {
+ output.pSample->SetUINT32(MFSampleExtension_Discontinuity, TRUE);
+ mDiscontinuity = false;
+ }
+
+ *aOutput = output.pSample; // AddRefs
+ if (mMFTProvidesOutputSamples && !providedSample) {
+ // If the MFT is providing samples, we must release the sample here.
+ // Typically only the H.264 MFT provides samples when using DXVA,
+ // and it always re-uses the same sample, so if we don't release it
+ // MFT::ProcessOutput() deadlocks waiting for the sample to be released.
+ output.pSample->Release();
+ output.pSample = nullptr;
+ }
+
+ return S_OK;
+}
+
+HRESULT
+MFTDecoder::Input(const uint8_t* aData,
+ uint32_t aDataSize,
+ int64_t aTimestamp)
+{
+ NS_ENSURE_TRUE(mDecoder != nullptr, E_POINTER);
+
+ RefPtr<IMFSample> input;
+ HRESULT hr = CreateInputSample(aData, aDataSize, aTimestamp, &input);
+ NS_ENSURE_TRUE(SUCCEEDED(hr) && input != nullptr, hr);
+
+ return Input(input);
+}
+
+HRESULT
+MFTDecoder::Input(IMFSample* aSample)
+{
+ HRESULT hr = mDecoder->ProcessInput(0, aSample, 0);
+ if (hr == MF_E_NOTACCEPTING) {
+ // MFT *already* has enough data to produce a sample. Retrieve it.
+ return MF_E_NOTACCEPTING;
+ }
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ return S_OK;
+}
+
+HRESULT
+MFTDecoder::Flush()
+{
+ HRESULT hr = SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ mDiscontinuity = true;
+
+ return S_OK;
+}
+
+HRESULT
+MFTDecoder::GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType)
+{
+ NS_ENSURE_TRUE(mDecoder, E_POINTER);
+ return mDecoder->GetOutputCurrentType(0, getter_AddRefs(aMediaType));
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/MFTDecoder.h b/dom/media/platforms/wmf/MFTDecoder.h
new file mode 100644
index 000000000..91c18f18c
--- /dev/null
+++ b/dom/media/platforms/wmf/MFTDecoder.h
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MFTDecoder_h_)
+#define MFTDecoder_h_
+
+#include "WMF.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "nsIThread.h"
+
+namespace mozilla {
+
+class MFTDecoder final {
+ ~MFTDecoder();
+
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MFTDecoder)
+
+ MFTDecoder();
+
+ // Creates the MFT. First thing to do as part of setup.
+ //
+ // Params:
+ // - aMFTClsID the clsid used by CoCreateInstance to instantiate the
+ // decoder MFT.
+ HRESULT Create(const GUID& aMFTClsID);
+
+ // Sets the input and output media types. Call after Init().
+ //
+ // Params:
+ // - aInputType needs at least major and minor types set.
+ // - aOutputType needs at least major and minor types set.
+ // This is used to select the matching output type out
+ // of all the available output types of the MFT.
+ typedef HRESULT (*ConfigureOutputCallback)(IMFMediaType* aOutputType, void* aData);
+ HRESULT SetMediaTypes(IMFMediaType* aInputType,
+ IMFMediaType* aOutputType,
+ ConfigureOutputCallback aCallback = nullptr,
+ void* aData = nullptr);
+
+ // Returns the MFT's IMFAttributes object.
+ already_AddRefed<IMFAttributes> GetAttributes();
+
+ // Retrieves the media type being output. This may not be valid until
+ // the first sample is decoded.
+ HRESULT GetOutputMediaType(RefPtr<IMFMediaType>& aMediaType);
+
+ // Submits data into the MFT for processing.
+ //
+ // Returns:
+ // - MF_E_NOTACCEPTING if the decoder can't accept input. The data
+ // must be resubmitted after Output() stops producing output.
+ HRESULT Input(const uint8_t* aData,
+ uint32_t aDataSize,
+ int64_t aTimestampUsecs);
+ HRESULT Input(IMFSample* aSample);
+
+ HRESULT CreateInputSample(const uint8_t* aData,
+ uint32_t aDataSize,
+ int64_t aTimestampUsecs,
+ RefPtr<IMFSample>* aOutSample);
+
+ // Retrieves output from the MFT. Call this once Input() returns
+ // MF_E_NOTACCEPTING. Some MFTs with hardware acceleration (the H.264
+ // decoder MFT in particular) can't handle it if clients hold onto
+ // references to the output IMFSample, so don't do that.
+ //
+ // Returns:
+ // - MF_E_TRANSFORM_STREAM_CHANGE if the underlying stream output
+ // type changed. Retrieve the output media type and reconfig client,
+ // else you may misinterpret the MFT's output.
+ // - MF_E_TRANSFORM_NEED_MORE_INPUT if no output can be produced
+ // due to lack of input.
+ // - S_OK if an output frame is produced.
+ HRESULT Output(RefPtr<IMFSample>* aOutput);
+
+ // Sends a flush message to the MFT. This causes it to discard all
+ // input data. Use before seeking.
+ HRESULT Flush();
+
+ // Sends a message to the MFT.
+ HRESULT SendMFTMessage(MFT_MESSAGE_TYPE aMsg, ULONG_PTR aData);
+
+
+ HRESULT SetDecoderOutputType(ConfigureOutputCallback aCallback, void* aData);
+private:
+
+
+ HRESULT CreateOutputSample(RefPtr<IMFSample>* aOutSample);
+
+ MFT_INPUT_STREAM_INFO mInputStreamInfo;
+ MFT_OUTPUT_STREAM_INFO mOutputStreamInfo;
+
+ RefPtr<IMFTransform> mDecoder;
+
+ RefPtr<IMFMediaType> mOutputType;
+
+ // True if the IMFTransform allocates the samples that it returns.
+ bool mMFTProvidesOutputSamples;
+
+ // True if we need to mark the next sample as a discontinuity.
+ bool mDiscontinuity;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/wmf/WMF.h b/dom/media/platforms/wmf/WMF.h
new file mode 100644
index 000000000..5ede0d361
--- /dev/null
+++ b/dom/media/platforms/wmf/WMF.h
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WMF_H_
+#define WMF_H_
+
+#if WINVER < _WIN32_WINNT_WIN7
+#error \
+You must include WMF.h before including mozilla headers, \
+otherwise mozconfig.h will be included \
+and that sets WINVER to WinXP, \
+which makes Windows Media Foundation unavailable.
+#endif
+
+#pragma push_macro("WINVER")
+#undef WINVER
+#define WINVER _WIN32_WINNT_WIN7
+
+#include <windows.h>
+#include <mfapi.h>
+#include <mfidl.h>
+#include <mfreadwrite.h>
+#include <mfobjects.h>
+#include <ks.h>
+#include <stdio.h>
+#include <mferror.h>
+#include <propvarutil.h>
+#include <wmcodecdsp.h>
+#include <d3d9.h>
+#include <dxva2api.h>
+#include <wmcodecdsp.h>
+#include <codecapi.h>
+
+// The Windows headers helpfully declare min and max macros, which don't
+// compile in the prescence of std::min and std::max and unified builds.
+// So undef them here.
+#ifdef min
+#undef min
+#endif
+#ifdef max
+#undef max
+#endif
+
+// Some SDK versions don't define the AAC decoder CLSID.
+#ifndef CLSID_CMSAACDecMFT
+extern "C" const CLSID CLSID_CMSAACDecMFT;
+#define WMF_MUST_DEFINE_AAC_MFT_CLSID
+#endif
+
+namespace mozilla {
+namespace wmf {
+
+// If successful, loads all required WMF DLLs and calls the WMF MFStartup()
+// function.
+HRESULT MFStartup();
+
+// Calls the WMF MFShutdown() function. Call this once for every time
+// wmf::MFStartup() succeeds. Note: does not unload the WMF DLLs loaded by
+// MFStartup(); leaves them in memory to save I/O at next MFStartup() call.
+HRESULT MFShutdown();
+
+// All functions below are wrappers around the corresponding WMF function,
+// and automatically locate and call the corresponding function in the WMF DLLs.
+
+HRESULT MFCreateMediaType(IMFMediaType **aOutMFType);
+
+HRESULT MFGetStrideForBitmapInfoHeader(DWORD aFormat,
+ DWORD aWidth,
+ LONG *aOutStride);
+
+HRESULT MFGetService(IUnknown *punkObject,
+ REFGUID guidService,
+ REFIID riid,
+ LPVOID *ppvObject);
+
+HRESULT DXVA2CreateDirect3DDeviceManager9(UINT *pResetToken,
+ IDirect3DDeviceManager9 **ppDXVAManager);
+
+
+HRESULT MFCreateDXGIDeviceManager(UINT *pResetToken, IMFDXGIDeviceManager **ppDXVAManager);
+
+HRESULT MFCreateSample(IMFSample **ppIMFSample);
+
+HRESULT MFCreateAlignedMemoryBuffer(DWORD cbMaxLength,
+ DWORD fAlignmentFlags,
+ IMFMediaBuffer **ppBuffer);
+
+HRESULT MFCreateDXGISurfaceBuffer(REFIID riid,
+ IUnknown *punkSurface,
+ UINT uSubresourceIndex,
+ BOOL fButtomUpWhenLinear,
+ IMFMediaBuffer **ppBuffer);
+
+} // end namespace wmf
+} // end namespace mozilla
+
+
+
+#pragma pop_macro("WINVER")
+
+#endif
diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.cpp b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
new file mode 100644
index 000000000..69b62da51
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.cpp
@@ -0,0 +1,358 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WMFAudioMFTManager.h"
+#include "MediaInfo.h"
+#include "VideoUtils.h"
+#include "WMFUtils.h"
+#include "nsTArray.h"
+#include "TimeUnits.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/Logging.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+static void
+AACAudioSpecificConfigToUserData(uint8_t aAACProfileLevelIndication,
+ const uint8_t* aAudioSpecConfig,
+ uint32_t aConfigLength,
+ nsTArray<BYTE>& aOutUserData)
+{
+ MOZ_ASSERT(aOutUserData.IsEmpty());
+
+ // The MF_MT_USER_DATA for AAC is defined here:
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/dd742784%28v=vs.85%29.aspx
+ //
+ // For MFAudioFormat_AAC, MF_MT_USER_DATA contains the portion of
+ // the HEAACWAVEINFO structure that appears after the WAVEFORMATEX
+ // structure (that is, after the wfx member). This is followed by
+ // the AudioSpecificConfig() data, as defined by ISO/IEC 14496-3.
+ // [...]
+ // The length of the AudioSpecificConfig() data is 2 bytes for AAC-LC
+ // or HE-AAC with implicit signaling of SBR/PS. It is more than 2 bytes
+ // for HE-AAC with explicit signaling of SBR/PS.
+ //
+ // The value of audioObjectType as defined in AudioSpecificConfig()
+ // must be 2, indicating AAC-LC. The value of extensionAudioObjectType
+ // must be 5 for SBR or 29 for PS.
+ //
+ // HEAACWAVEINFO structure:
+ // typedef struct heaacwaveinfo_tag {
+ // WAVEFORMATEX wfx;
+ // WORD wPayloadType;
+ // WORD wAudioProfileLevelIndication;
+ // WORD wStructType;
+ // WORD wReserved1;
+ // DWORD dwReserved2;
+ // }
+ const UINT32 heeInfoLen = 4 * sizeof(WORD) + sizeof(DWORD);
+
+ // The HEAACWAVEINFO must have payload and profile set,
+ // the rest can be all 0x00.
+ BYTE heeInfo[heeInfoLen] = {0};
+ WORD* w = (WORD*)heeInfo;
+ w[0] = 0x0; // Payload type raw AAC packet
+ w[1] = aAACProfileLevelIndication;
+
+ aOutUserData.AppendElements(heeInfo, heeInfoLen);
+
+ if (aAACProfileLevelIndication == 2 && aConfigLength > 2) {
+ // The AudioSpecificConfig is TTTTTFFF|FCCCCGGG
+ // (T=ObjectType, F=Frequency, C=Channel, G=GASpecificConfig)
+ // If frequency = 0xf, then the frequency is explicitly defined on 24 bits.
+ int8_t profile = (aAudioSpecConfig[0] & 0xF8) >> 3;
+ int8_t frequency =
+ (aAudioSpecConfig[0] & 0x7) << 1 | (aAudioSpecConfig[1] & 0x80) >> 7;
+ int8_t channels = (aAudioSpecConfig[1] & 0x78) >> 3;
+ int8_t gasc = aAudioSpecConfig[1] & 0x7;
+ if (frequency != 0xf && channels && !gasc) {
+ // We enter this condition if the AudioSpecificConfig should theorically
+ // be 2 bytes long but it's not.
+ // The WMF AAC decoder will error if unknown extensions are found,
+ // so remove them.
+ aConfigLength = 2;
+ }
+ }
+ aOutUserData.AppendElements(aAudioSpecConfig, aConfigLength);
+}
+
+WMFAudioMFTManager::WMFAudioMFTManager(
+ const AudioInfo& aConfig)
+ : mAudioChannels(aConfig.mChannels)
+ , mAudioRate(aConfig.mRate)
+ , mAudioFrameSum(0)
+ , mMustRecaptureAudioPosition(true)
+{
+ MOZ_COUNT_CTOR(WMFAudioMFTManager);
+
+ if (aConfig.mMimeType.EqualsLiteral("audio/mpeg")) {
+ mStreamType = MP3;
+ } else if (aConfig.mMimeType.EqualsLiteral("audio/mp4a-latm")) {
+ mStreamType = AAC;
+ AACAudioSpecificConfigToUserData(aConfig.mExtendedProfile,
+ aConfig.mCodecSpecificConfig->Elements(),
+ aConfig.mCodecSpecificConfig->Length(),
+ mUserData);
+ } else {
+ mStreamType = Unknown;
+ }
+}
+
+WMFAudioMFTManager::~WMFAudioMFTManager()
+{
+ MOZ_COUNT_DTOR(WMFAudioMFTManager);
+}
+
+const GUID&
+WMFAudioMFTManager::GetMFTGUID()
+{
+ MOZ_ASSERT(mStreamType != Unknown);
+ switch (mStreamType) {
+ case AAC: return CLSID_CMSAACDecMFT;
+ case MP3: return CLSID_CMP3DecMediaObject;
+ default: return GUID_NULL;
+ };
+}
+
+const GUID&
+WMFAudioMFTManager::GetMediaSubtypeGUID()
+{
+ MOZ_ASSERT(mStreamType != Unknown);
+ switch (mStreamType) {
+ case AAC: return MFAudioFormat_AAC;
+ case MP3: return MFAudioFormat_MP3;
+ default: return GUID_NULL;
+ };
+}
+
+bool
+WMFAudioMFTManager::Init()
+{
+ NS_ENSURE_TRUE(mStreamType != Unknown, false);
+
+ RefPtr<MFTDecoder> decoder(new MFTDecoder());
+
+ HRESULT hr = decoder->Create(GetMFTGUID());
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ // Setup input/output media types
+ RefPtr<IMFMediaType> inputType;
+
+ hr = wmf::MFCreateMediaType(getter_AddRefs(inputType));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = inputType->SetGUID(MF_MT_SUBTYPE, GetMediaSubtypeGUID());
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = inputType->SetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, mAudioRate);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = inputType->SetUINT32(MF_MT_AUDIO_NUM_CHANNELS, mAudioChannels);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ if (mStreamType == AAC) {
+ hr = inputType->SetUINT32(MF_MT_AAC_PAYLOAD_TYPE, 0x0); // Raw AAC packet
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = inputType->SetBlob(MF_MT_USER_DATA,
+ mUserData.Elements(),
+ mUserData.Length());
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+ }
+
+ RefPtr<IMFMediaType> outputType;
+ hr = wmf::MFCreateMediaType(getter_AddRefs(outputType));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Audio);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = outputType->SetGUID(MF_MT_SUBTYPE, MFAudioFormat_PCM);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = outputType->SetUINT32(MF_MT_AUDIO_BITS_PER_SAMPLE, 16);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ hr = decoder->SetMediaTypes(inputType, outputType);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ mDecoder = decoder;
+
+ return true;
+}
+
+HRESULT
+WMFAudioMFTManager::Input(MediaRawData* aSample)
+{
+ return mDecoder->Input(aSample->Data(),
+ uint32_t(aSample->Size()),
+ aSample->mTime);
+}
+
+HRESULT
+WMFAudioMFTManager::UpdateOutputType()
+{
+ HRESULT hr;
+
+ RefPtr<IMFMediaType> type;
+ hr = mDecoder->GetOutputMediaType(type);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = type->GetUINT32(MF_MT_AUDIO_SAMPLES_PER_SECOND, &mAudioRate);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = type->GetUINT32(MF_MT_AUDIO_NUM_CHANNELS, &mAudioChannels);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ AudioConfig::ChannelLayout layout(mAudioChannels);
+ if (!layout.IsValid()) {
+ return E_FAIL;
+ }
+
+ return S_OK;
+}
+
+HRESULT
+WMFAudioMFTManager::Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutData)
+{
+ aOutData = nullptr;
+ RefPtr<IMFSample> sample;
+ HRESULT hr;
+ int typeChangeCount = 0;
+ while (true) {
+ hr = mDecoder->Output(&sample);
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ return hr;
+ }
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ hr = UpdateOutputType();
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ // Catch infinite loops, but some decoders perform at least 2 stream
+ // changes on consecutive calls, so be permissive.
+ // 100 is arbitrarily > 2.
+ NS_ENSURE_TRUE(typeChangeCount < 100, MF_E_TRANSFORM_STREAM_CHANGE);
+ ++typeChangeCount;
+ continue;
+ }
+ break;
+ }
+
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ if (!sample) {
+ LOG("Audio MFTDecoder returned success but null output.");
+ nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([]() -> void {
+ LOG("Reporting telemetry AUDIO_MFT_OUTPUT_NULL_SAMPLES");
+ Telemetry::Accumulate(Telemetry::ID::AUDIO_MFT_OUTPUT_NULL_SAMPLES, 1);
+ });
+ AbstractThread::MainThread()->Dispatch(task.forget());
+ return E_FAIL;
+ }
+
+ RefPtr<IMFMediaBuffer> buffer;
+ hr = sample->ConvertToContiguousBuffer(getter_AddRefs(buffer));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ BYTE* data = nullptr; // Note: *data will be owned by the IMFMediaBuffer, we don't need to free it.
+ DWORD maxLength = 0, currentLength = 0;
+ hr = buffer->Lock(&data, &maxLength, &currentLength);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ // Sometimes when starting decoding, the AAC decoder gives us samples
+ // with a negative timestamp. AAC does usually have preroll (or encoder
+ // delay) encoded into its bitstream, but the amount encoded to the stream
+ // is variable, and it not signalled in-bitstream. There is sometimes
+ // signalling in the MP4 container what the preroll amount, but it's
+ // inconsistent. It looks like WMF's AAC encoder may take this into
+ // account, so strip off samples with a negative timestamp to get us
+ // to a 0-timestamp start. This seems to maintain A/V sync, so we can run
+ // with this until someone complains...
+
+ // We calculate the timestamp and the duration based on the number of audio
+ // frames we've already played. We don't trust the timestamp stored on the
+ // IMFSample, as sometimes it's wrong, possibly due to buggy encoders?
+
+ // If this sample block comes after a discontinuity (i.e. a gap or seek)
+ // reset the frame counters, and capture the timestamp. Future timestamps
+ // will be offset from this block's timestamp.
+ UINT32 discontinuity = false;
+ sample->GetUINT32(MFSampleExtension_Discontinuity, &discontinuity);
+ if (mMustRecaptureAudioPosition || discontinuity) {
+ // Update the output type, in case this segment has a different
+ // rate. This also triggers on the first sample, which can have a
+ // different rate than is advertised in the container, and sometimes we
+ // don't get a MF_E_TRANSFORM_STREAM_CHANGE when the rate changes.
+ hr = UpdateOutputType();
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ mAudioFrameSum = 0;
+ LONGLONG timestampHns = 0;
+ hr = sample->GetSampleTime(&timestampHns);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ mAudioTimeOffset = media::TimeUnit::FromMicroseconds(timestampHns / 10);
+ mMustRecaptureAudioPosition = false;
+ }
+ // We can assume PCM 16 output.
+ int32_t numSamples = currentLength / 2;
+ int32_t numFrames = numSamples / mAudioChannels;
+ MOZ_ASSERT(numFrames >= 0);
+ MOZ_ASSERT(numSamples >= 0);
+ if (numFrames == 0) {
+ // All data from this chunk stripped, loop back and try to output the next
+ // frame, if possible.
+ return S_OK;
+ }
+
+ AlignedAudioBuffer audioData(numSamples);
+ if (!audioData) {
+ return E_OUTOFMEMORY;
+ }
+
+ int16_t* pcm = (int16_t*)data;
+ for (int32_t i = 0; i < numSamples; ++i) {
+ audioData[i] = AudioSampleToFloat(pcm[i]);
+ }
+
+ buffer->Unlock();
+
+ media::TimeUnit timestamp =
+ mAudioTimeOffset + FramesToTimeUnit(mAudioFrameSum, mAudioRate);
+ NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL);
+
+ mAudioFrameSum += numFrames;
+
+ media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate);
+ NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
+
+ aOutData = new AudioData(aStreamOffset,
+ timestamp.ToMicroseconds(),
+ duration.ToMicroseconds(),
+ numFrames,
+ Move(audioData),
+ mAudioChannels,
+ mAudioRate);
+
+ #ifdef LOG_SAMPLE_DECODE
+ LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",
+ timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength);
+ #endif
+
+ return S_OK;
+}
+
+void
+WMFAudioMFTManager::Shutdown()
+{
+ mDecoder = nullptr;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFAudioMFTManager.h b/dom/media/platforms/wmf/WMFAudioMFTManager.h
new file mode 100644
index 000000000..5bbbc6108
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFAudioMFTManager.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(WMFAudioOutputSource_h_)
+#define WMFAudioOutputSource_h_
+
+#include "WMF.h"
+#include "MFTDecoder.h"
+#include "mozilla/RefPtr.h"
+#include "WMFMediaDataDecoder.h"
+
+extern const GUID CLSID_WebmMfVpxDec;
+
+namespace mozilla {
+
+class WMFAudioMFTManager : public MFTManager {
+public:
+ WMFAudioMFTManager(const AudioInfo& aConfig);
+ ~WMFAudioMFTManager();
+
+ bool Init();
+
+ HRESULT Input(MediaRawData* aSample) override;
+
+ // Note WMF's AAC decoder sometimes output negatively timestamped samples,
+ // presumably they're the preroll samples, and we strip them. We may return
+ // a null aOutput in this case.
+ HRESULT Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutput) override;
+
+ void Shutdown() override;
+
+ TrackInfo::TrackType GetType() override {
+ return TrackInfo::kAudioTrack;
+ }
+
+ const char* GetDescriptionName() const override
+ {
+ return "wmf audio decoder";
+ }
+
+private:
+
+ HRESULT UpdateOutputType();
+
+ uint32_t mAudioChannels;
+ uint32_t mAudioRate;
+ nsTArray<BYTE> mUserData;
+
+ // The offset, at which playback started since the
+ // last discontinuity.
+ media::TimeUnit mAudioTimeOffset;
+ // The number of audio frames that we've played since the last
+ // discontinuity.
+ int64_t mAudioFrameSum;
+
+ enum StreamType {
+ Unknown,
+ AAC,
+ MP3
+ };
+ StreamType mStreamType;
+
+ const GUID& GetMFTGUID();
+ const GUID& GetMediaSubtypeGUID();
+
+ // True if we need to re-initialize mAudioTimeOffset and mAudioFrameSum
+ // from the next audio packet we decode. This happens after a seek, since
+ // WMF doesn't mark a stream as having a discontinuity after a seek(0).
+ bool mMustRecaptureAudioPosition;
+};
+
+} // namespace mozilla
+
+#endif // WMFAudioOutputSource_h_
diff --git a/dom/media/platforms/wmf/WMFDecoderModule.cpp b/dom/media/platforms/wmf/WMFDecoderModule.cpp
new file mode 100644
index 000000000..06bf49fa6
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -0,0 +1,257 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WMF.h"
+#include "WMFDecoderModule.h"
+#include "WMFVideoMFTManager.h"
+#include "WMFAudioMFTManager.h"
+#include "MFTDecoder.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Services.h"
+#include "WMFMediaDataDecoder.h"
+#include "nsAutoPtr.h"
+#include "nsIWindowsRegKey.h"
+#include "nsComponentManagerUtils.h"
+#include "nsServiceManagerUtils.h"
+#include "nsIGfxInfo.h"
+#include "nsWindowsHelpers.h"
+#include "GfxDriverInfo.h"
+#include "mozilla/gfx/gfxVars.h"
+#include "MediaInfo.h"
+#include "MediaPrefs.h"
+#include "prsystem.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/WindowsVersion.h"
+#include "MP4Decoder.h"
+#include "VPXDecoder.h"
+
+namespace mozilla {
+
+static Atomic<bool> sDXVAEnabled(false);
+
+WMFDecoderModule::WMFDecoderModule()
+ : mWMFInitialized(false)
+{
+}
+
+WMFDecoderModule::~WMFDecoderModule()
+{
+ if (mWMFInitialized) {
+ DebugOnly<HRESULT> hr = wmf::MFShutdown();
+ NS_ASSERTION(SUCCEEDED(hr), "MFShutdown failed");
+ }
+}
+
+/* static */
+void
+WMFDecoderModule::Init()
+{
+ sDXVAEnabled = gfx::gfxVars::CanUseHardwareVideoDecoding();
+}
+
+/* static */
+int
+WMFDecoderModule::GetNumDecoderThreads()
+{
+ int32_t numCores = PR_GetNumberOfProcessors();
+
+ // If we have more than 4 cores, let the decoder decide how many threads.
+ // On an 8 core machine, WMF chooses 4 decoder threads
+ const int WMF_DECODER_DEFAULT = -1;
+ int32_t prefThreadCount = WMF_DECODER_DEFAULT;
+ if (XRE_GetProcessType() != GeckoProcessType_GPU) {
+ prefThreadCount = MediaPrefs::PDMWMFThreadCount();
+ }
+ if (prefThreadCount != WMF_DECODER_DEFAULT) {
+ return std::max(prefThreadCount, 1);
+ } else if (numCores > 4) {
+ return WMF_DECODER_DEFAULT;
+ }
+ return std::max(numCores - 1, 1);
+}
+
+nsresult
+WMFDecoderModule::Startup()
+{
+ mWMFInitialized = SUCCEEDED(wmf::MFStartup());
+ return mWMFInitialized ? NS_OK : NS_ERROR_FAILURE;
+}
+
+already_AddRefed<MediaDataDecoder>
+WMFDecoderModule::CreateVideoDecoder(const CreateDecoderParams& aParams)
+{
+ nsAutoPtr<WMFVideoMFTManager> manager(
+ new WMFVideoMFTManager(aParams.VideoConfig(),
+ aParams.mKnowsCompositor,
+ aParams.mImageContainer,
+ sDXVAEnabled));
+
+ if (!manager->Init()) {
+ return nullptr;
+ }
+
+ RefPtr<MediaDataDecoder> decoder =
+ new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue, aParams.mCallback);
+
+ return decoder.forget();
+}
+
+already_AddRefed<MediaDataDecoder>
+WMFDecoderModule::CreateAudioDecoder(const CreateDecoderParams& aParams)
+{
+ nsAutoPtr<WMFAudioMFTManager> manager(new WMFAudioMFTManager(aParams.AudioConfig()));
+
+ if (!manager->Init()) {
+ return nullptr;
+ }
+
+ RefPtr<MediaDataDecoder> decoder =
+ new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue, aParams.mCallback);
+ return decoder.forget();
+}
+
+static bool
+CanCreateMFTDecoder(const GUID& aGuid)
+{
+ if (FAILED(wmf::MFStartup())) {
+ return false;
+ }
+ bool hasdecoder = false;
+ {
+ RefPtr<MFTDecoder> decoder(new MFTDecoder());
+ hasdecoder = SUCCEEDED(decoder->Create(aGuid));
+ }
+ wmf::MFShutdown();
+ return hasdecoder;
+}
+
+template<const GUID& aGuid>
+static bool
+CanCreateWMFDecoder()
+{
+ static StaticMutex sMutex;
+ StaticMutexAutoLock lock(sMutex);
+ static Maybe<bool> result;
+ if (result.isNothing()) {
+ result.emplace(CanCreateMFTDecoder(aGuid));
+ }
+ return result.value();
+}
+
+static bool
+IsH264DecoderBlacklisted()
+{
+#ifdef BLACKLIST_CRASHY_H264_DECODERS
+ WCHAR systemPath[MAX_PATH + 1];
+ if (!ConstructSystem32Path(L"msmpeg2vdec.dll", systemPath, MAX_PATH + 1)) {
+ // Cannot build path -> Assume it's not the blacklisted DLL.
+ return false;
+ }
+
+ DWORD zero;
+ DWORD infoSize = GetFileVersionInfoSizeW(systemPath, &zero);
+ if (infoSize == 0) {
+ // Can't get file info -> Assume we don't have the blacklisted DLL.
+ return false;
+ }
+ auto infoData = MakeUnique<unsigned char[]>(infoSize);
+ VS_FIXEDFILEINFO *vInfo;
+ UINT vInfoLen;
+ if (GetFileVersionInfoW(systemPath, 0, infoSize, infoData.get()) &&
+ VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen))
+ {
+ if ((vInfo->dwFileVersionMS == ((12u << 16) | 0u))
+ && ((vInfo->dwFileVersionLS == ((9200u << 16) | 16426u))
+ || (vInfo->dwFileVersionLS == ((9200u << 16) | 17037u)))) {
+ // 12.0.9200.16426 & .17037 are blacklisted on Win64, see bug 1242343.
+ return true;
+ }
+ }
+#endif // BLACKLIST_CRASHY_H264_DECODERS
+ return false;
+}
+
+/* static */ bool
+WMFDecoderModule::HasH264()
+{
+ if (IsH264DecoderBlacklisted()) {
+ return false;
+ }
+ return CanCreateWMFDecoder<CLSID_CMSH264DecoderMFT>();
+}
+
+/* static */ bool
+WMFDecoderModule::HasAAC()
+{
+ return CanCreateWMFDecoder<CLSID_CMSAACDecMFT>();
+}
+
+bool
+WMFDecoderModule::SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ UniquePtr<TrackInfo> trackInfo = CreateTrackInfoWithMIMEType(aMimeType);
+ if (!trackInfo) {
+ return false;
+ }
+ return Supports(*trackInfo, aDiagnostics);
+}
+
+bool
+WMFDecoderModule::Supports(const TrackInfo& aTrackInfo,
+ DecoderDoctorDiagnostics* aDiagnostics) const
+{
+ if ((aTrackInfo.mMimeType.EqualsLiteral("audio/mp4a-latm") ||
+ aTrackInfo.mMimeType.EqualsLiteral("audio/mp4")) &&
+ WMFDecoderModule::HasAAC()) {
+ return true;
+ }
+ if (MP4Decoder::IsH264(aTrackInfo.mMimeType) && WMFDecoderModule::HasH264()) {
+ const VideoInfo* videoInfo = aTrackInfo.GetAsVideoInfo();
+ MOZ_ASSERT(videoInfo);
+ // Check Windows format constraints, based on:
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/dd797815(v=vs.85).aspx
+ if (IsWin8OrLater()) {
+ // Windows >7 supports at most 4096x2304.
+ if (videoInfo->mImage.width > 4096 || videoInfo->mImage.height > 2304) {
+ return false;
+ }
+ } else {
+ // Windows <=7 supports at most 1920x1088.
+ if (videoInfo->mImage.width > 1920 || videoInfo->mImage.height > 1088) {
+ return false;
+ }
+ }
+ return true;
+ }
+ if (aTrackInfo.mMimeType.EqualsLiteral("audio/mpeg") &&
+ CanCreateWMFDecoder<CLSID_CMP3DecMediaObject>()) {
+ return true;
+ }
+ if (MediaPrefs::PDMWMFVP9DecoderEnabled() && sDXVAEnabled) {
+ if ((VPXDecoder::IsVP8(aTrackInfo.mMimeType) ||
+ VPXDecoder::IsVP9(aTrackInfo.mMimeType)) &&
+ CanCreateWMFDecoder<CLSID_WebmMfVpxDec>()) {
+ return true;
+ }
+ }
+
+ // Some unsupported codec.
+ return false;
+}
+
+PlatformDecoderModule::ConversionRequired
+WMFDecoderModule::DecoderNeedsConversion(const TrackInfo& aConfig) const
+{
+ if (aConfig.IsVideo() && MP4Decoder::IsH264(aConfig.mMimeType)) {
+ return ConversionRequired::kNeedAnnexB;
+ } else {
+ return ConversionRequired::kNeedNone;
+ }
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFDecoderModule.h b/dom/media/platforms/wmf/WMFDecoderModule.h
new file mode 100644
index 000000000..cd7b8c660
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFDecoderModule.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(WMFPlatformDecoderModule_h_)
+#define WMFPlatformDecoderModule_h_
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+class WMFDecoderModule : public PlatformDecoderModule {
+public:
+ WMFDecoderModule();
+ virtual ~WMFDecoderModule();
+
+ // Initializes the module, loads required dynamic libraries, etc.
+ nsresult Startup() override;
+
+ already_AddRefed<MediaDataDecoder>
+ CreateVideoDecoder(const CreateDecoderParams& aParams) override;
+
+ already_AddRefed<MediaDataDecoder>
+ CreateAudioDecoder(const CreateDecoderParams& aParams) override;
+
+ bool SupportsMimeType(const nsACString& aMimeType,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+ bool Supports(const TrackInfo& aTrackInfo,
+ DecoderDoctorDiagnostics* aDiagnostics) const override;
+
+ ConversionRequired
+ DecoderNeedsConversion(const TrackInfo& aConfig) const override;
+
+ // Called on main thread.
+ static void Init();
+
+ // Called from any thread, must call init first
+ static int GetNumDecoderThreads();
+
+ // Accessors that report whether we have the required MFTs available
+ // on the system to play various codecs. Windows Vista doesn't have the
+ // H.264/AAC decoders if the "Platform Update Supplement for Windows Vista"
+ // is not installed, and Window N and KN variants also require a "Media
+ // Feature Pack" to be installed. Windows XP doesn't have WMF.
+ static bool HasAAC();
+ static bool HasH264();
+
+private:
+ bool mWMFInitialized;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
new file mode 100644
index 000000000..d2c13eac7
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
@@ -0,0 +1,227 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WMFMediaDataDecoder.h"
+#include "VideoUtils.h"
+#include "WMFUtils.h"
+#include "nsTArray.h"
+#include "mozilla/Telemetry.h"
+
+#include "mozilla/Logging.h"
+#include "mozilla/SyncRunnable.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+namespace mozilla {
+
+WMFMediaDataDecoder::WMFMediaDataDecoder(MFTManager* aMFTManager,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback)
+ : mTaskQueue(aTaskQueue)
+ , mCallback(aCallback)
+ , mMFTManager(aMFTManager)
+ , mIsFlushing(false)
+ , mIsShutDown(false)
+{
+}
+
+WMFMediaDataDecoder::~WMFMediaDataDecoder()
+{
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+WMFMediaDataDecoder::Init()
+{
+ MOZ_ASSERT(!mIsShutDown);
+ return InitPromise::CreateAndResolve(mMFTManager->GetType(), __func__);
+}
+
+// A single telemetry sample is reported for each MediaDataDecoder object
+// that has detected error or produced output successfully.
+static void
+SendTelemetry(unsigned long hr)
+{
+ // Collapse the error codes into a range of 0-0xff that can be viewed in
+ // telemetry histograms. For most MF_E_* errors, unique samples are used,
+ // retaining the least significant 7 or 8 bits. Other error codes are
+ // bucketed.
+ uint32_t sample;
+ if (SUCCEEDED(hr)) {
+ sample = 0;
+ } else if (hr < 0xc00d36b0) {
+ sample = 1; // low bucket
+ } else if (hr < 0xc00d3700) {
+ sample = hr & 0xffU; // MF_E_*
+ } else if (hr <= 0xc00d3705) {
+ sample = 0x80 + (hr & 0xfU); // more MF_E_*
+ } else if (hr < 0xc00d6d60) {
+ sample = 2; // mid bucket
+ } else if (hr <= 0xc00d6d78) {
+ sample = hr & 0xffU; // MF_E_TRANSFORM_*
+ } else {
+ sample = 3; // high bucket
+ }
+
+ nsCOMPtr<nsIRunnable> runnable = NS_NewRunnableFunction(
+ [sample] {
+ Telemetry::Accumulate(Telemetry::MEDIA_WMF_DECODE_ERROR, sample);
+ });
+ NS_DispatchToMainThread(runnable);
+}
+
+void
+WMFMediaDataDecoder::Shutdown()
+{
+ MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+
+ if (mTaskQueue) {
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &WMFMediaDataDecoder::ProcessShutdown));
+ } else {
+ ProcessShutdown();
+ }
+ mIsShutDown = true;
+}
+
+void
+WMFMediaDataDecoder::ProcessShutdown()
+{
+ if (mMFTManager) {
+ mMFTManager->Shutdown();
+ mMFTManager = nullptr;
+ if (!mRecordedError && mHasSuccessfulOutput) {
+ SendTelemetry(S_OK);
+ }
+ }
+}
+
+// Inserts data into the decoder's pipeline.
+void
+WMFMediaDataDecoder::Input(MediaRawData* aSample)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod<RefPtr<MediaRawData>>(
+ this,
+ &WMFMediaDataDecoder::ProcessDecode,
+ RefPtr<MediaRawData>(aSample));
+ mTaskQueue->Dispatch(runnable.forget());
+}
+
+void
+WMFMediaDataDecoder::ProcessDecode(MediaRawData* aSample)
+{
+ if (mIsFlushing) {
+ // Skip sample, to be released by runnable.
+ return;
+ }
+
+ HRESULT hr = mMFTManager->Input(aSample);
+ if (FAILED(hr)) {
+ NS_WARNING("MFTManager rejected sample");
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("MFTManager::Input:%x", hr)));
+ if (!mRecordedError) {
+ SendTelemetry(hr);
+ mRecordedError = true;
+ }
+ return;
+ }
+
+ mLastStreamOffset = aSample->mOffset;
+
+ ProcessOutput();
+}
+
+void
+WMFMediaDataDecoder::ProcessOutput()
+{
+ RefPtr<MediaData> output;
+ HRESULT hr = S_OK;
+ while (SUCCEEDED(hr = mMFTManager->Output(mLastStreamOffset, output)) &&
+ output) {
+ mHasSuccessfulOutput = true;
+ mCallback->Output(output);
+ }
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ mCallback->InputExhausted();
+ } else if (FAILED(hr)) {
+ NS_WARNING("WMFMediaDataDecoder failed to output data");
+ mCallback->Error(MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("MFTManager::Output:%x", hr)));
+ if (!mRecordedError) {
+ SendTelemetry(hr);
+ mRecordedError = true;
+ }
+ }
+}
+
+void
+WMFMediaDataDecoder::ProcessFlush()
+{
+ if (mMFTManager) {
+ mMFTManager->Flush();
+ }
+}
+
+void
+WMFMediaDataDecoder::Flush()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+
+ mIsFlushing = true;
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &WMFMediaDataDecoder::ProcessFlush);
+ SyncRunnable::DispatchToThread(mTaskQueue, runnable);
+ mIsFlushing = false;
+}
+
+void
+WMFMediaDataDecoder::ProcessDrain()
+{
+ if (!mIsFlushing && mMFTManager) {
+ // Order the decoder to drain...
+ mMFTManager->Drain();
+ // Then extract all available output.
+ ProcessOutput();
+ }
+ mCallback->DrainComplete();
+}
+
+void
+WMFMediaDataDecoder::Drain()
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &WMFMediaDataDecoder::ProcessDrain));
+}
+
+bool
+WMFMediaDataDecoder::IsHardwareAccelerated(nsACString& aFailureReason) const {
+ MOZ_ASSERT(!mIsShutDown);
+
+ return mMFTManager && mMFTManager->IsHardwareAccelerated(aFailureReason);
+}
+
+void
+WMFMediaDataDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+ MOZ_ASSERT(mCallback->OnReaderTaskQueue());
+ MOZ_DIAGNOSTIC_ASSERT(!mIsShutDown);
+
+ RefPtr<WMFMediaDataDecoder> self = this;
+ nsCOMPtr<nsIRunnable> runnable =
+ NS_NewRunnableFunction([self, aTime]() {
+ media::TimeUnit threshold = aTime;
+ self->mMFTManager->SetSeekThreshold(threshold);
+ });
+ mTaskQueue->Dispatch(runnable.forget());
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFMediaDataDecoder.h b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
new file mode 100644
index 000000000..a4dd49f56
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(WMFMediaDataDecoder_h_)
+#define WMFMediaDataDecoder_h_
+
+
+#include "WMF.h"
+#include "MFTDecoder.h"
+#include "mozilla/RefPtr.h"
+#include "nsAutoPtr.h"
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+// Encapsulates the initialization of the MFTDecoder appropriate for decoding
+// a given stream, and the process of converting the IMFSample produced
+// by the MFT into a MediaData object.
+class MFTManager {
+public:
+ virtual ~MFTManager() {}
+
+ // Submit a compressed sample for decoding.
+ // This should forward to the MFTDecoder after performing
+ // any required sample formatting.
+ virtual HRESULT Input(MediaRawData* aSample) = 0;
+
+ // Produces decoded output, if possible. Blocks until output can be produced,
+ // or until no more is able to be produced.
+ // Returns S_OK on success, or MF_E_TRANSFORM_NEED_MORE_INPUT if there's not
+ // enough data to produce more output. If this returns a failure code other
+ // than MF_E_TRANSFORM_NEED_MORE_INPUT, an error will be reported to the
+ // MP4Reader.
+ virtual HRESULT Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutput) = 0;
+
+ virtual void Flush()
+ {
+ mDecoder->Flush();
+ mSeekTargetThreshold.reset();
+ }
+
+ virtual void Drain()
+ {
+ if (FAILED(mDecoder->SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0))) {
+ NS_WARNING("Failed to send DRAIN command to MFT");
+ }
+ }
+
+ // Destroys all resources.
+ virtual void Shutdown() = 0;
+
+ virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
+
+ virtual TrackInfo::TrackType GetType() = 0;
+
+ virtual const char* GetDescriptionName() const = 0;
+
+ virtual void SetSeekThreshold(const media::TimeUnit& aTime) {
+ mSeekTargetThreshold = Some(aTime);
+ }
+
+protected:
+ // IMFTransform wrapper that performs the decoding.
+ RefPtr<MFTDecoder> mDecoder;
+
+ Maybe<media::TimeUnit> mSeekTargetThreshold;
+};
+
+// Decodes audio and video using Windows Media Foundation. Samples are decoded
+// using the MFTDecoder created by the MFTManager. This class implements
+// the higher-level logic that drives mapping the MFT to the async
+// MediaDataDecoder interface. The specifics of decoding the exact stream
+// type are handled by MFTManager and the MFTDecoder it creates.
+class WMFMediaDataDecoder : public MediaDataDecoder {
+public:
+ WMFMediaDataDecoder(MFTManager* aOutputSource,
+ TaskQueue* aTaskQueue,
+ MediaDataDecoderCallback* aCallback);
+ ~WMFMediaDataDecoder();
+
+ RefPtr<MediaDataDecoder::InitPromise> Init() override;
+
+ void Input(MediaRawData* aSample);
+
+ void Flush() override;
+
+ void Drain() override;
+
+ void Shutdown() override;
+
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
+
+ const char* GetDescriptionName() const override
+ {
+ return mMFTManager ? mMFTManager->GetDescriptionName() : "";
+ }
+
+ virtual void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
+private:
+
+ // Called on the task queue. Inserts the sample into the decoder, and
+ // extracts output if available.
+ void ProcessDecode(MediaRawData* aSample);
+
+ // Called on the task queue. Extracts output if available, and delivers
+ // it to the reader. Called after ProcessDecode() and ProcessDrain().
+ void ProcessOutput();
+
+ // Called on the task queue. Orders the MFT to flush. There is no output to
+ // extract.
+ void ProcessFlush();
+
+ // Called on the task queue. Orders the MFT to drain, and then extracts
+ // all available output.
+ void ProcessDrain();
+
+ void ProcessShutdown();
+
+ const RefPtr<TaskQueue> mTaskQueue;
+ MediaDataDecoderCallback* mCallback;
+
+ nsAutoPtr<MFTManager> mMFTManager;
+
+ // The last offset into the media resource that was passed into Input().
+ // This is used to approximate the decoder's position in the media resource.
+ int64_t mLastStreamOffset;
+
+ // Set on reader/decode thread calling Flush() to indicate that output is
+ // not required and so input samples on mTaskQueue need not be processed.
+ // Cleared on mTaskQueue.
+ Atomic<bool> mIsFlushing;
+
+ bool mIsShutDown;
+
+ // For telemetry
+ bool mHasSuccessfulOutput = false;
+ bool mRecordedError = false;
+};
+
+} // namespace mozilla
+
+#endif // WMFMediaDataDecoder_h_
diff --git a/dom/media/platforms/wmf/WMFUtils.cpp b/dom/media/platforms/wmf/WMFUtils.cpp
new file mode 100644
index 000000000..8aec8a8af
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFUtils.cpp
@@ -0,0 +1,311 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WMFUtils.h"
+#include <stdint.h>
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/WindowsVersion.h"
+#include "mozilla/Logging.h"
+#include "nsThreadUtils.h"
+#include "nsWindowsHelpers.h"
+#include "mozilla/CheckedInt.h"
+#include "VideoUtils.h"
+#include <initguid.h>
+#include "nsTArray.h"
+
+#ifdef WMF_MUST_DEFINE_AAC_MFT_CLSID
+// Some SDK versions don't define the AAC decoder CLSID.
+// {32D186A7-218F-4C75-8876-DD77273A8999}
+DEFINE_GUID(CLSID_CMSAACDecMFT, 0x32D186A7, 0x218F, 0x4C75, 0x88, 0x76, 0xDD, 0x77, 0x27, 0x3A, 0x89, 0x99);
+#endif
+
+namespace mozilla {
+
+HRESULT
+HNsToFrames(int64_t aHNs, uint32_t aRate, int64_t* aOutFrames)
+{
+ MOZ_ASSERT(aOutFrames);
+ const int64_t HNS_PER_S = USECS_PER_S * 10;
+ CheckedInt<int64_t> i = aHNs;
+ i *= aRate;
+ i /= HNS_PER_S;
+ NS_ENSURE_TRUE(i.isValid(), E_FAIL);
+ *aOutFrames = i.value();
+ return S_OK;
+}
+
+HRESULT
+GetDefaultStride(IMFMediaType *aType, uint32_t aWidth, uint32_t* aOutStride)
+{
+ // Try to get the default stride from the media type.
+ HRESULT hr = aType->GetUINT32(MF_MT_DEFAULT_STRIDE, aOutStride);
+ if (SUCCEEDED(hr)) {
+ return S_OK;
+ }
+
+ // Stride attribute not set, calculate it.
+ GUID subtype = GUID_NULL;
+
+ hr = aType->GetGUID(MF_MT_SUBTYPE, &subtype);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = wmf::MFGetStrideForBitmapInfoHeader(subtype.Data1, aWidth, (LONG*)(aOutStride));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ return hr;
+}
+
+int32_t
+MFOffsetToInt32(const MFOffset& aOffset)
+{
+ return int32_t(aOffset.value + (aOffset.fract / 65536.0f));
+}
+
+media::TimeUnit
+GetSampleDuration(IMFSample* aSample)
+{
+ NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
+ int64_t duration = 0;
+ aSample->GetSampleDuration(&duration);
+ return media::TimeUnit::FromMicroseconds(HNsToUsecs(duration));
+}
+
+media::TimeUnit
+GetSampleTime(IMFSample* aSample)
+{
+ NS_ENSURE_TRUE(aSample, media::TimeUnit::Invalid());
+ LONGLONG timestampHns = 0;
+ HRESULT hr = aSample->GetSampleTime(&timestampHns);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), media::TimeUnit::Invalid());
+ return media::TimeUnit::FromMicroseconds(HNsToUsecs(timestampHns));
+}
+
+// Gets the sub-region of the video frame that should be displayed.
+// See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb530115(v=vs.85).aspx
+HRESULT
+GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion)
+{
+ // Determine if "pan and scan" is enabled for this media. If it is, we
+ // only display a region of the video frame, not the entire frame.
+ BOOL panScan = MFGetAttributeUINT32(aMediaType, MF_MT_PAN_SCAN_ENABLED, FALSE);
+
+ // If pan and scan mode is enabled. Try to get the display region.
+ HRESULT hr = E_FAIL;
+ MFVideoArea videoArea;
+ memset(&videoArea, 0, sizeof(MFVideoArea));
+ if (panScan) {
+ hr = aMediaType->GetBlob(MF_MT_PAN_SCAN_APERTURE,
+ (UINT8*)&videoArea,
+ sizeof(MFVideoArea),
+ nullptr);
+ }
+
+ // If we're not in pan-and-scan mode, or the pan-and-scan region is not set,
+ // check for a minimimum display aperture.
+ if (!panScan || hr == MF_E_ATTRIBUTENOTFOUND) {
+ hr = aMediaType->GetBlob(MF_MT_MINIMUM_DISPLAY_APERTURE,
+ (UINT8*)&videoArea,
+ sizeof(MFVideoArea),
+ nullptr);
+ }
+
+ if (hr == MF_E_ATTRIBUTENOTFOUND) {
+ // Minimum display aperture is not set, for "backward compatibility with
+ // some components", check for a geometric aperture.
+ hr = aMediaType->GetBlob(MF_MT_GEOMETRIC_APERTURE,
+ (UINT8*)&videoArea,
+ sizeof(MFVideoArea),
+ nullptr);
+ }
+
+ if (SUCCEEDED(hr)) {
+ // The media specified a picture region, return it.
+ aOutPictureRegion = nsIntRect(MFOffsetToInt32(videoArea.OffsetX),
+ MFOffsetToInt32(videoArea.OffsetY),
+ videoArea.Area.cx,
+ videoArea.Area.cy);
+ return S_OK;
+ }
+
+ // No picture region defined, fall back to using the entire video area.
+ UINT32 width = 0, height = 0;
+ hr = MFGetAttributeSize(aMediaType, MF_MT_FRAME_SIZE, &width, &height);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ NS_ENSURE_TRUE(width <= MAX_VIDEO_WIDTH, E_FAIL);
+ NS_ENSURE_TRUE(height <= MAX_VIDEO_HEIGHT, E_FAIL);
+
+ aOutPictureRegion = nsIntRect(0, 0, width, height);
+ return S_OK;
+}
+
+namespace wmf {
+
+static const wchar_t* sDLLs[] = {
+ L"mfplat.dll",
+ L"mf.dll",
+ L"dxva2.dll",
+ L"evr.dll",
+};
+
+HRESULT
+LoadDLLs()
+{
+ static bool sDLLsLoaded = false;
+ static bool sFailedToLoadDlls = false;
+
+ if (sDLLsLoaded) {
+ return S_OK;
+ }
+ if (sFailedToLoadDlls) {
+ return E_FAIL;
+ }
+
+ // Try to load all the required DLLs. If we fail to load any dll,
+ // unload the dlls we succeeded in loading.
+ nsTArray<const wchar_t*> loadedDlls;
+ for (const wchar_t* dll : sDLLs) {
+ if (!LoadLibrarySystem32(dll)) {
+ NS_WARNING("Failed to load WMF DLLs");
+ for (const wchar_t* loadedDll : loadedDlls) {
+ FreeLibrary(GetModuleHandleW(loadedDll));
+ }
+ sFailedToLoadDlls = true;
+ return E_FAIL;
+ }
+ loadedDlls.AppendElement(dll);
+ }
+ sDLLsLoaded = true;
+
+ return S_OK;
+}
+
+#define ENSURE_FUNCTION_PTR_HELPER(FunctionType, FunctionName, DLL) \
+ static FunctionType FunctionName##Ptr = nullptr; \
+ if (!FunctionName##Ptr) { \
+ FunctionName##Ptr = (FunctionType) GetProcAddress(GetModuleHandleW(L ## #DLL), #FunctionName); \
+ if (!FunctionName##Ptr) { \
+ NS_WARNING("Failed to get GetProcAddress of " #FunctionName " from " #DLL); \
+ return E_FAIL; \
+ } \
+ }
+
+#define ENSURE_FUNCTION_PTR(FunctionName, DLL) \
+ ENSURE_FUNCTION_PTR_HELPER(decltype(::FunctionName)*, FunctionName, DLL) \
+
+#define ENSURE_FUNCTION_PTR_(FunctionName, DLL) \
+ ENSURE_FUNCTION_PTR_HELPER(FunctionName##Ptr_t, FunctionName, DLL) \
+
+#define DECL_FUNCTION_PTR(FunctionName, ...) \
+ typedef HRESULT (STDMETHODCALLTYPE * FunctionName##Ptr_t)(__VA_ARGS__)
+
+HRESULT
+MFStartup()
+{
+ if (!IsVistaOrLater() || IsWin7AndPre2000Compatible()) {
+ // *Only* use WMF on Vista and later, as if Firefox is run in Windows 95
+ // compatibility mode on Windows 7 (it does happen!) we may crash trying
+ // to startup WMF. So we need to detect the OS version here, as in
+ // compatibility mode IsVistaOrLater() and friends behave as if we're on
+ // the emulated version of Windows. See bug 1279171.
+ // Using GetVersionEx API which takes compatibility mode into account.
+ return E_FAIL;
+ }
+
+ HRESULT hr = LoadDLLs();
+ if (FAILED(hr)) {
+ return hr;
+ }
+
+ const int MF_VISTA_VERSION = (0x0001 << 16 | MF_API_VERSION);
+ const int MF_WIN7_VERSION = (0x0002 << 16 | MF_API_VERSION);
+
+ // decltype is unusable for functions having default parameters
+ DECL_FUNCTION_PTR(MFStartup, ULONG, DWORD);
+ ENSURE_FUNCTION_PTR_(MFStartup, Mfplat.dll)
+ if (!IsWin7OrLater())
+ return MFStartupPtr(MF_VISTA_VERSION, MFSTARTUP_FULL);
+ else
+ return MFStartupPtr(MF_WIN7_VERSION, MFSTARTUP_FULL);
+}
+
+HRESULT
+MFShutdown()
+{
+ ENSURE_FUNCTION_PTR(MFShutdown, Mfplat.dll)
+ return (MFShutdownPtr)();
+}
+
+HRESULT
+MFCreateMediaType(IMFMediaType **aOutMFType)
+{
+ ENSURE_FUNCTION_PTR(MFCreateMediaType, Mfplat.dll)
+ return (MFCreateMediaTypePtr)(aOutMFType);
+}
+
+
+HRESULT
+MFGetStrideForBitmapInfoHeader(DWORD aFormat,
+ DWORD aWidth,
+ LONG *aOutStride)
+{
+ ENSURE_FUNCTION_PTR(MFGetStrideForBitmapInfoHeader, evr.dll)
+ return (MFGetStrideForBitmapInfoHeaderPtr)(aFormat, aWidth, aOutStride);
+}
+
+HRESULT MFGetService(IUnknown *punkObject,
+ REFGUID guidService,
+ REFIID riid,
+ LPVOID *ppvObject)
+{
+ ENSURE_FUNCTION_PTR(MFGetService, mf.dll)
+ return (MFGetServicePtr)(punkObject, guidService, riid, ppvObject);
+}
+
+HRESULT
+DXVA2CreateDirect3DDeviceManager9(UINT *pResetToken,
+ IDirect3DDeviceManager9 **ppDXVAManager)
+{
+ ENSURE_FUNCTION_PTR(DXVA2CreateDirect3DDeviceManager9, dxva2.dll)
+ return (DXVA2CreateDirect3DDeviceManager9Ptr)(pResetToken, ppDXVAManager);
+}
+
+HRESULT
+MFCreateSample(IMFSample **ppIMFSample)
+{
+ ENSURE_FUNCTION_PTR(MFCreateSample, mfplat.dll)
+ return (MFCreateSamplePtr)(ppIMFSample);
+}
+
+HRESULT
+MFCreateAlignedMemoryBuffer(DWORD cbMaxLength,
+ DWORD fAlignmentFlags,
+ IMFMediaBuffer **ppBuffer)
+{
+ ENSURE_FUNCTION_PTR(MFCreateAlignedMemoryBuffer, mfplat.dll)
+ return (MFCreateAlignedMemoryBufferPtr)(cbMaxLength, fAlignmentFlags, ppBuffer);
+}
+
+HRESULT
+MFCreateDXGIDeviceManager(UINT *pResetToken, IMFDXGIDeviceManager **ppDXVAManager)
+{
+ ENSURE_FUNCTION_PTR(MFCreateDXGIDeviceManager, mfplat.dll)
+ return (MFCreateDXGIDeviceManagerPtr)(pResetToken, ppDXVAManager);
+}
+
+HRESULT
+MFCreateDXGISurfaceBuffer(REFIID riid,
+ IUnknown *punkSurface,
+ UINT uSubresourceIndex,
+ BOOL fButtomUpWhenLinear,
+ IMFMediaBuffer **ppBuffer)
+{
+ ENSURE_FUNCTION_PTR(MFCreateDXGISurfaceBuffer, mfplat.dll)
+ return (MFCreateDXGISurfaceBufferPtr)(riid, punkSurface, uSubresourceIndex, fButtomUpWhenLinear, ppBuffer);
+}
+
+} // end namespace wmf
+} // end namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFUtils.h b/dom/media/platforms/wmf/WMFUtils.h
new file mode 100644
index 000000000..ac8779774
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFUtils.h
@@ -0,0 +1,67 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WMFUtils_h
+#define WMFUtils_h
+
+#include "WMF.h"
+#include "nsString.h"
+#include "nsRect.h"
+#include "TimeUnits.h"
+#include "VideoUtils.h"
+
+// Various utilities shared by WMF backend files.
+
+namespace mozilla {
+
+// Converts from microseconds to hundreds of nanoseconds.
+// We use microseconds for our timestamps, whereas WMF uses
+// hundreds of nanoseconds.
+inline int64_t
+UsecsToHNs(int64_t aUsecs) {
+ return aUsecs * 10;
+}
+
+// Converts from hundreds of nanoseconds to microseconds.
+// We use microseconds for our timestamps, whereas WMF uses
+// hundreds of nanoseconds.
+inline int64_t
+HNsToUsecs(int64_t hNanoSecs) {
+ return hNanoSecs / 10;
+}
+
+HRESULT
+HNsToFrames(int64_t aHNs, uint32_t aRate, int64_t* aOutFrames);
+
+HRESULT
+GetDefaultStride(IMFMediaType *aType, uint32_t aWidth, uint32_t* aOutStride);
+
+int32_t
+MFOffsetToInt32(const MFOffset& aOffset);
+
+// Gets the sub-region of the video frame that should be displayed.
+// See: http://msdn.microsoft.com/en-us/library/windows/desktop/bb530115(v=vs.85).aspx
+HRESULT
+GetPictureRegion(IMFMediaType* aMediaType, nsIntRect& aOutPictureRegion);
+
+// Returns the duration of a IMFSample in TimeUnit.
+// Returns media::TimeUnit::Invalid() on failure.
+media::TimeUnit
+GetSampleDuration(IMFSample* aSample);
+
+// Returns the presentation time of a IMFSample in TimeUnit.
+// Returns media::TimeUnit::Invalid() on failure.
+media::TimeUnit
+GetSampleTime(IMFSample* aSample);
+
+inline bool
+IsFlagSet(DWORD flags, DWORD pattern) {
+ return (flags & pattern) == pattern;
+}
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
new file mode 100644
index 000000000..291bc5b74
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -0,0 +1,1016 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <algorithm>
+#include <winsdkver.h>
+#include <psapi.h>
+#include "WMFVideoMFTManager.h"
+#include "MediaDecoderReader.h"
+#include "gfxPrefs.h"
+#include "WMFUtils.h"
+#include "ImageContainer.h"
+#include "VideoUtils.h"
+#include "DXVA2Manager.h"
+#include "nsThreadUtils.h"
+#include "Layers.h"
+#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/layers/LayersTypes.h"
+#include "MediaInfo.h"
+#include "mozilla/Logging.h"
+#include "nsWindowsHelpers.h"
+#include "gfx2DGlue.h"
+#include "gfxWindowsPlatform.h"
+#include "IMFYCbCrImage.h"
+#include "mozilla/WindowsVersion.h"
+#include "mozilla/Telemetry.h"
+#include "nsPrintfCString.h"
+#include "MediaTelemetryConstants.h"
+#include "GMPUtils.h" // For SplitAt. TODO: Move SplitAt to a central place.
+#include "MP4Decoder.h"
+#include "VPXDecoder.h"
+#include "mozilla/SyncRunnable.h"
+
+#define LOG(...) MOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, (__VA_ARGS__))
+
+using mozilla::layers::Image;
+using mozilla::layers::IMFYCbCrImage;
+using mozilla::layers::LayerManager;
+using mozilla::layers::LayersBackend;
+
+#if WINVER_MAXVER < 0x0A00
+// Windows 10+ SDK has VP80 and VP90 defines
+const GUID MFVideoFormat_VP80 =
+{
+ 0x30385056,
+ 0x0000,
+ 0x0010,
+ {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+};
+
+const GUID MFVideoFormat_VP90 =
+{
+ 0x30395056,
+ 0x0000,
+ 0x0010,
+ {0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+};
+#endif
+
+const CLSID CLSID_WebmMfVpxDec =
+{
+ 0xe3aaf548,
+ 0xc9a4,
+ 0x4c6e,
+ { 0x23, 0x4d, 0x5a, 0xda, 0x37, 0x4b, 0x00, 0x00 }
+};
+
+namespace mozilla {
+
+LayersBackend
+GetCompositorBackendType(layers::KnowsCompositor* aKnowsCompositor)
+{
+ if (aKnowsCompositor) {
+ return aKnowsCompositor->GetCompositorBackendType();
+ }
+ return LayersBackend::LAYERS_NONE;
+}
+
+WMFVideoMFTManager::WMFVideoMFTManager(
+ const VideoInfo& aConfig,
+ layers::KnowsCompositor* aKnowsCompositor,
+ layers::ImageContainer* aImageContainer,
+ bool aDXVAEnabled)
+ : mVideoInfo(aConfig)
+ , mVideoStride(0)
+ , mImageSize(aConfig.mImage)
+ , mImageContainer(aImageContainer)
+ , mDXVAEnabled(aDXVAEnabled)
+ , mKnowsCompositor(aKnowsCompositor)
+ , mNullOutputCount(0)
+ , mGotValidOutputAfterNullOutput(false)
+ , mGotExcessiveNullOutput(false)
+ , mIsValid(true)
+ // mVideoStride, mVideoWidth, mVideoHeight, mUseHwAccel are initialized in
+ // Init().
+{
+ MOZ_COUNT_CTOR(WMFVideoMFTManager);
+
+ // Need additional checks/params to check vp8/vp9
+ if (MP4Decoder::IsH264(aConfig.mMimeType)) {
+ mStreamType = H264;
+ } else if (VPXDecoder::IsVP8(aConfig.mMimeType)) {
+ mStreamType = VP8;
+ } else if (VPXDecoder::IsVP9(aConfig.mMimeType)) {
+ mStreamType = VP9;
+ } else {
+ mStreamType = Unknown;
+ }
+}
+
+WMFVideoMFTManager::~WMFVideoMFTManager()
+{
+ MOZ_COUNT_DTOR(WMFVideoMFTManager);
+ // Ensure DXVA/D3D9 related objects are released on the main thread.
+ if (mDXVA2Manager) {
+ DeleteOnMainThread(mDXVA2Manager);
+ }
+
+ // Record whether the video decoder successfully decoded, or output null
+ // samples but did/didn't recover.
+ uint32_t telemetry = (mNullOutputCount == 0) ? 0 :
+ (mGotValidOutputAfterNullOutput && mGotExcessiveNullOutput) ? 1 :
+ mGotExcessiveNullOutput ? 2 :
+ mGotValidOutputAfterNullOutput ? 3 :
+ 4;
+
+ nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([=]() -> void {
+ LOG(nsPrintfCString("Reporting telemetry VIDEO_MFT_OUTPUT_NULL_SAMPLES=%d", telemetry).get());
+ Telemetry::Accumulate(Telemetry::ID::VIDEO_MFT_OUTPUT_NULL_SAMPLES, telemetry);
+ });
+ AbstractThread::MainThread()->Dispatch(task.forget());
+}
+
+const GUID&
+WMFVideoMFTManager::GetMFTGUID()
+{
+ MOZ_ASSERT(mStreamType != Unknown);
+ switch (mStreamType) {
+ case H264: return CLSID_CMSH264DecoderMFT;
+ case VP8: return CLSID_WebmMfVpxDec;
+ case VP9: return CLSID_WebmMfVpxDec;
+ default: return GUID_NULL;
+ };
+}
+
+const GUID&
+WMFVideoMFTManager::GetMediaSubtypeGUID()
+{
+ MOZ_ASSERT(mStreamType != Unknown);
+ switch (mStreamType) {
+ case H264: return MFVideoFormat_H264;
+ case VP8: return MFVideoFormat_VP80;
+ case VP9: return MFVideoFormat_VP90;
+ default: return GUID_NULL;
+ };
+}
+
+struct D3DDLLBlacklistingCache
+{
+ // Blacklist pref value last seen.
+ nsCString mBlacklistPref;
+ // Non-empty if a blacklisted DLL was found.
+ nsCString mBlacklistedDLL;
+};
+StaticAutoPtr<D3DDLLBlacklistingCache> sD3D11BlacklistingCache;
+StaticAutoPtr<D3DDLLBlacklistingCache> sD3D9BlacklistingCache;
+
+// If a blacklisted DLL is found, return its information, otherwise "".
+static const nsCString&
+FindDXVABlacklistedDLL(StaticAutoPtr<D3DDLLBlacklistingCache>& aDLLBlacklistingCache,
+ const nsCString& aBlacklist,
+ const char* aDLLBlacklistPrefName)
+{
+ NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
+
+ if (!aDLLBlacklistingCache) {
+ // First time here, create persistent data that will be reused in all
+ // D3D11-blacklisting checks.
+ aDLLBlacklistingCache = new D3DDLLBlacklistingCache();
+ ClearOnShutdown(&aDLLBlacklistingCache);
+ }
+
+ if (aBlacklist.IsEmpty()) {
+ // Empty blacklist -> No blacklisting.
+ aDLLBlacklistingCache->mBlacklistPref.SetLength(0);
+ aDLLBlacklistingCache->mBlacklistedDLL.SetLength(0);
+ return aDLLBlacklistingCache->mBlacklistedDLL;
+ }
+
+ // Detect changes in pref.
+ if (aDLLBlacklistingCache->mBlacklistPref.Equals(aBlacklist)) {
+ // Same blacklist -> Return same result (i.e., don't check DLLs again).
+ return aDLLBlacklistingCache->mBlacklistedDLL;
+ }
+ // Adopt new pref now, so we don't work on it again.
+ aDLLBlacklistingCache->mBlacklistPref = aBlacklist;
+
+ HANDLE hProcess = GetCurrentProcess();
+ mozilla::UniquePtr<HMODULE[]> hMods;
+ unsigned int modulesNum = 0;
+ if (hProcess != NULL) {
+ DWORD modulesSize;
+ EnumProcessModules(hProcess, nullptr, 0, &modulesSize);
+ modulesNum = modulesSize / sizeof(HMODULE);
+ hMods = mozilla::MakeUnique<HMODULE[]>(modulesNum);
+ EnumProcessModules(hProcess, hMods.get(), modulesNum * sizeof(HMODULE), &modulesSize);
+ }
+
+ // media.wmf.disable-d3d*-for-dlls format: (whitespace is trimmed)
+ // "dll1.dll: 1.2.3.4[, more versions...][; more dlls...]"
+ nsTArray<nsCString> dlls;
+ SplitAt(";", aBlacklist, dlls);
+ for (const auto& dll : dlls) {
+ nsTArray<nsCString> nameAndVersions;
+ SplitAt(":", dll, nameAndVersions);
+ if (nameAndVersions.Length() != 2) {
+ NS_WARNING(nsPrintfCString("Skipping incorrect '%s' dll:versions format",
+ aDLLBlacklistPrefName).get());
+ continue;
+ }
+
+ nameAndVersions[0].CompressWhitespace();
+ NS_ConvertUTF8toUTF16 name(nameAndVersions[0]);
+
+ for (unsigned int i = 0; i <= modulesNum; i++) {
+ WCHAR dllPath[MAX_PATH + 1];
+
+ if (i < modulesNum) {
+ if (!GetModuleFileNameEx(hProcess, hMods[i], dllPath, sizeof(dllPath) / sizeof(WCHAR))) {
+ continue;
+ }
+
+ nsCOMPtr<nsIFile> file;
+ if (NS_WARN_IF(NS_FAILED(NS_NewLocalFile(nsDependentString(dllPath), false, getter_AddRefs(file))))) {
+ continue;
+ }
+
+ nsAutoString leafName;
+ if (NS_WARN_IF(NS_FAILED(file->GetLeafName(leafName)))) {
+ continue;
+ }
+
+ if (_wcsicmp(leafName.get(), name.get())) {
+ continue;
+ }
+ } else {
+ if (!ConstructSystem32Path(name.get(), dllPath, MAX_PATH + 1)) {
+ // Cannot build path -> Assume it's not the blacklisted DLL.
+ continue;
+ }
+ }
+
+ DWORD zero;
+ DWORD infoSize = GetFileVersionInfoSizeW(dllPath, &zero);
+ if (infoSize == 0) {
+ // Can't get file info -> Assume we don't have the blacklisted DLL.
+ continue;
+ }
+ // vInfo is a pointer into infoData, that's why we keep it outside of the loop.
+ auto infoData = MakeUnique<unsigned char[]>(infoSize);
+ VS_FIXEDFILEINFO *vInfo;
+ UINT vInfoLen;
+ if (!GetFileVersionInfoW(dllPath, 0, infoSize, infoData.get())
+ || !VerQueryValueW(infoData.get(), L"\\", (LPVOID*)&vInfo, &vInfoLen)
+ || !vInfo) {
+ // Can't find version -> Assume it's not blacklisted.
+ continue;
+ }
+
+ nsTArray<nsCString> versions;
+ SplitAt(",", nameAndVersions[1], versions);
+ for (const auto& version : versions) {
+ nsTArray<nsCString> numberStrings;
+ SplitAt(".", version, numberStrings);
+ if (numberStrings.Length() != 4) {
+ NS_WARNING(nsPrintfCString("Skipping incorrect '%s' a.b.c.d version format",
+ aDLLBlacklistPrefName).get());
+ continue;
+ }
+ DWORD numbers[4];
+ nsresult errorCode = NS_OK;
+ for (int i = 0; i < 4; ++i) {
+ numberStrings[i].CompressWhitespace();
+ numbers[i] = DWORD(numberStrings[i].ToInteger(&errorCode));
+ if (NS_FAILED(errorCode)) {
+ break;
+ }
+ if (numbers[i] > UINT16_MAX) {
+ errorCode = NS_ERROR_FAILURE;
+ break;
+ }
+ }
+
+ if (NS_FAILED(errorCode)) {
+ NS_WARNING(nsPrintfCString("Skipping incorrect '%s' a.b.c.d version format",
+ aDLLBlacklistPrefName).get());
+ continue;
+ }
+
+ if (vInfo->dwFileVersionMS == ((numbers[0] << 16) | numbers[1])
+ && vInfo->dwFileVersionLS == ((numbers[2] << 16) | numbers[3])) {
+ // Blacklisted! Record bad DLL.
+ aDLLBlacklistingCache->mBlacklistedDLL.SetLength(0);
+ aDLLBlacklistingCache->mBlacklistedDLL.AppendPrintf(
+ "%s (%lu.%lu.%lu.%lu)",
+ nameAndVersions[0].get(), numbers[0], numbers[1], numbers[2], numbers[3]);
+ return aDLLBlacklistingCache->mBlacklistedDLL;
+ }
+ }
+ }
+ }
+
+ // No blacklisted DLL.
+ aDLLBlacklistingCache->mBlacklistedDLL.SetLength(0);
+ return aDLLBlacklistingCache->mBlacklistedDLL;
+}
+
+static const nsCString&
+FindD3D11BlacklistedDLL() {
+ return FindDXVABlacklistedDLL(sD3D11BlacklistingCache,
+ gfx::gfxVars::PDMWMFDisableD3D11Dlls(),
+ "media.wmf.disable-d3d11-for-dlls");
+}
+
+static const nsCString&
+FindD3D9BlacklistedDLL() {
+ return FindDXVABlacklistedDLL(sD3D9BlacklistingCache,
+ gfx::gfxVars::PDMWMFDisableD3D9Dlls(),
+ "media.wmf.disable-d3d9-for-dlls");
+}
+
+class CreateDXVAManagerEvent : public Runnable {
+public:
+ CreateDXVAManagerEvent(LayersBackend aBackend,
+ layers::KnowsCompositor* aKnowsCompositor,
+ nsCString& aFailureReason)
+ : mBackend(aBackend)
+ , mKnowsCompositor(aKnowsCompositor)
+ , mFailureReason(aFailureReason)
+ {}
+
+ NS_IMETHOD Run() override {
+ NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
+ nsACString* failureReason = &mFailureReason;
+ nsCString secondFailureReason;
+ if (mBackend == LayersBackend::LAYERS_D3D11 &&
+ gfxPrefs::PDMWMFAllowD3D11() && IsWin8OrLater()) {
+ const nsCString& blacklistedDLL = FindD3D11BlacklistedDLL();
+ if (!blacklistedDLL.IsEmpty()) {
+ failureReason->AppendPrintf("D3D11 blacklisted with DLL %s",
+ blacklistedDLL.get());
+ } else {
+ mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA(mKnowsCompositor, *failureReason);
+ if (mDXVA2Manager) {
+ return NS_OK;
+ }
+ }
+ // Try again with d3d9, but record the failure reason
+ // into a new var to avoid overwriting the d3d11 failure.
+ failureReason = &secondFailureReason;
+ mFailureReason.Append(NS_LITERAL_CSTRING("; "));
+ }
+
+ const nsCString& blacklistedDLL = FindD3D9BlacklistedDLL();
+ if (!blacklistedDLL.IsEmpty()) {
+ mFailureReason.AppendPrintf("D3D9 blacklisted with DLL %s",
+ blacklistedDLL.get());
+ } else {
+ mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA(mKnowsCompositor, *failureReason);
+ // Make sure we include the messages from both attempts (if applicable).
+ mFailureReason.Append(secondFailureReason);
+ }
+ return NS_OK;
+ }
+ nsAutoPtr<DXVA2Manager> mDXVA2Manager;
+ layers::LayersBackend mBackend;
+ KnowsCompositor* mKnowsCompositor;
+ nsACString& mFailureReason;
+};
+
+bool
+WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
+{
+ // If we use DXVA but aren't running with a D3D layer manager then the
+ // readback of decoded video frames from GPU to CPU memory grinds painting
+ // to a halt, and makes playback performance *worse*.
+ if (!mDXVAEnabled) {
+ mDXVAFailureReason.AssignLiteral("Hardware video decoding disabled or blacklisted");
+ return false;
+ }
+ MOZ_ASSERT(!mDXVA2Manager);
+ LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
+ if (backend != LayersBackend::LAYERS_D3D9 &&
+ backend != LayersBackend::LAYERS_D3D11) {
+ mDXVAFailureReason.AssignLiteral("Unsupported layers backend");
+ return false;
+ }
+
+ // The DXVA manager must be created on the main thread.
+ RefPtr<CreateDXVAManagerEvent> event =
+ new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9
+ : backend,
+ mKnowsCompositor,
+ mDXVAFailureReason);
+
+ if (NS_IsMainThread()) {
+ event->Run();
+ } else {
+ // This logic needs to run on the main thread
+ nsCOMPtr<nsIThread> mainThread = do_GetMainThread();
+ mozilla::SyncRunnable::DispatchToThread(mainThread, event);
+ }
+ mDXVA2Manager = event->mDXVA2Manager;
+
+ return mDXVA2Manager != nullptr;
+}
+
+bool
+WMFVideoMFTManager::ValidateVideoInfo()
+{
+ // The WMF H.264 decoder is documented to have a minimum resolution
+ // 48x48 pixels. We've observed the decoder working for output smaller than
+ // that, but on some output it hangs in IMFTransform::ProcessOutput(), so
+ // we just reject streams which are less than the documented minimum.
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/dd797815(v=vs.85).aspx
+ static const int32_t MIN_H264_FRAME_DIMENSION = 48;
+ if (mStreamType == H264 &&
+ (mVideoInfo.mImage.width < MIN_H264_FRAME_DIMENSION ||
+ mVideoInfo.mImage.height < MIN_H264_FRAME_DIMENSION)) {
+ LogToBrowserConsole(NS_LITERAL_STRING(
+ "Can't decode H.264 stream with width or height less than 48 pixels."));
+ mIsValid = false;
+ }
+
+ return mIsValid;
+}
+
+bool
+WMFVideoMFTManager::Init()
+{
+ if (!ValidateVideoInfo()) {
+ return false;
+ }
+
+ bool success = InitInternal(/* aForceD3D9 = */ false);
+
+ if (success && mDXVA2Manager) {
+ // If we had some failures but eventually made it work,
+ // make sure we preserve the messages.
+ if (mDXVA2Manager->IsD3D11()) {
+ mDXVAFailureReason.Append(NS_LITERAL_CSTRING("Using D3D11 API"));
+ } else {
+ mDXVAFailureReason.Append(NS_LITERAL_CSTRING("Using D3D9 API"));
+ }
+ }
+
+ return success;
+}
+
+bool
+WMFVideoMFTManager::InitInternal(bool aForceD3D9)
+{
+ mUseHwAccel = false; // default value; changed if D3D setup succeeds.
+ bool useDxva = InitializeDXVA(aForceD3D9);
+
+ RefPtr<MFTDecoder> decoder(new MFTDecoder());
+
+ HRESULT hr = decoder->Create(GetMFTGUID());
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ RefPtr<IMFAttributes> attr(decoder->GetAttributes());
+ UINT32 aware = 0;
+ if (attr) {
+ attr->GetUINT32(MF_SA_D3D_AWARE, &aware);
+ attr->SetUINT32(CODECAPI_AVDecNumWorkerThreads,
+ WMFDecoderModule::GetNumDecoderThreads());
+ if (gfxPrefs::PDMWMFLowLatencyEnabled()) {
+ hr = attr->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE);
+ if (SUCCEEDED(hr)) {
+ LOG("Enabling Low Latency Mode");
+ } else {
+ LOG("Couldn't enable Low Latency Mode");
+ }
+ }
+ }
+
+ if (useDxva) {
+ if (aware) {
+ // TODO: Test if I need this anywhere... Maybe on Vista?
+ //hr = attr->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
+ //NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ MOZ_ASSERT(mDXVA2Manager);
+ ULONG_PTR manager = ULONG_PTR(mDXVA2Manager->GetDXVADeviceManager());
+ hr = decoder->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, manager);
+ if (SUCCEEDED(hr)) {
+ mUseHwAccel = true;
+ } else {
+ DeleteOnMainThread(mDXVA2Manager);
+ mDXVAFailureReason = nsPrintfCString("MFT_MESSAGE_SET_D3D_MANAGER failed with code %X", hr);
+ }
+ }
+ else {
+ mDXVAFailureReason.AssignLiteral("Decoder returned false for MF_SA_D3D_AWARE");
+ }
+ }
+
+ if (!mUseHwAccel) {
+ // Use VP8/9 MFT only if HW acceleration is available
+ if (mStreamType == VP9 || mStreamType == VP8) {
+ return false;
+ }
+ Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
+ uint32_t(media::MediaDecoderBackend::WMFSoftware));
+ }
+
+ mDecoder = decoder;
+ hr = SetDecoderMediaTypes();
+ NS_ENSURE_TRUE(SUCCEEDED(hr), false);
+
+ LOG("Video Decoder initialized, Using DXVA: %s", (mUseHwAccel ? "Yes" : "No"));
+
+ return true;
+}
+
+HRESULT
+WMFVideoMFTManager::SetDecoderMediaTypes()
+{
+ // Setup the input/output media types.
+ RefPtr<IMFMediaType> inputType;
+ HRESULT hr = wmf::MFCreateMediaType(getter_AddRefs(inputType));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = inputType->SetGUID(MF_MT_SUBTYPE, GetMediaSubtypeGUID());
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_MixedInterlaceOrProgressive);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ // MSFT MFT needs this frame size set for VP9?
+ if (mStreamType == VP9 || mStreamType == VP8) {
+ hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, mVideoInfo.ImageRect().width, mVideoInfo.ImageRect().height);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ }
+
+ RefPtr<IMFMediaType> outputType;
+ hr = wmf::MFCreateMediaType(getter_AddRefs(outputType));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ GUID outputSubType = mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12;
+ hr = outputType->SetGUID(MF_MT_SUBTYPE, outputSubType);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ return mDecoder->SetMediaTypes(inputType, outputType);
+}
+
+HRESULT
+WMFVideoMFTManager::Input(MediaRawData* aSample)
+{
+ if (!mIsValid) {
+ return E_FAIL;
+ }
+
+ if (!mDecoder) {
+ // This can happen during shutdown.
+ return E_FAIL;
+ }
+
+ HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
+ uint32_t(aSample->Size()),
+ aSample->mTime,
+ &mLastInput);
+ NS_ENSURE_TRUE(SUCCEEDED(hr) && mLastInput != nullptr, hr);
+
+ mLastDuration = aSample->mDuration;
+ mLastTime = aSample->mTime;
+ mSamplesCount++;
+
+ // Forward sample data to the decoder.
+ return mDecoder->Input(mLastInput);
+}
+
+class SupportsConfigEvent : public Runnable {
+public:
+ SupportsConfigEvent(DXVA2Manager* aDXVA2Manager, IMFMediaType* aMediaType, float aFramerate)
+ : mDXVA2Manager(aDXVA2Manager)
+ , mMediaType(aMediaType)
+ , mFramerate(aFramerate)
+ , mSupportsConfig(false)
+ {}
+
+ NS_IMETHOD Run() {
+ MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
+ mSupportsConfig = mDXVA2Manager->SupportsConfig(mMediaType, mFramerate);
+ return NS_OK;
+ }
+ DXVA2Manager* mDXVA2Manager;
+ IMFMediaType* mMediaType;
+ float mFramerate;
+ bool mSupportsConfig;
+};
+
+// The MFTransform we use for decoding h264 video will silently fall
+// back to software decoding (even if we've negotiated DXVA) if the GPU
+// doesn't support decoding the given resolution. It will then upload
+// the software decoded frames into d3d textures to preserve behaviour.
+//
+// Unfortunately this seems to cause corruption (see bug 1193547) and is
+// slow because the upload is done into a non-shareable texture and requires
+// us to copy it.
+//
+// This code tests if the given resolution can be supported directly on the GPU,
+// and makes sure we only ask the MFT for DXVA if it can be supported properly.
+//
+// Ideally we'd know the framerate during initialization and would also ensure
+// that new decoders are created if the resolution changes. Then we could move
+// this check into Init and consolidate the main thread blocking code.
+bool
+WMFVideoMFTManager::CanUseDXVA(IMFMediaType* aType)
+{
+ MOZ_ASSERT(mDXVA2Manager);
+ // SupportsConfig only checks for valid h264 decoders currently.
+ if (mStreamType != H264) {
+ return true;
+ }
+
+ // Assume the current samples duration is representative for the
+ // entire video.
+ float framerate = 1000000.0 / mLastDuration;
+
+ // The supports config check must be done on the main thread since we have
+ // a crash guard protecting it.
+ RefPtr<SupportsConfigEvent> event =
+ new SupportsConfigEvent(mDXVA2Manager, aType, framerate);
+
+ if (NS_IsMainThread()) {
+ event->Run();
+ } else {
+ // This logic needs to run on the main thread
+ nsCOMPtr<nsIThread> mainThread = do_GetMainThread();
+ mozilla::SyncRunnable::DispatchToThread(mainThread, event);
+ }
+
+ return event->mSupportsConfig;
+}
+
+HRESULT
+WMFVideoMFTManager::ConfigureVideoFrameGeometry()
+{
+ RefPtr<IMFMediaType> mediaType;
+ HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ // If we enabled/disabled DXVA in response to a resolution
+ // change then we need to renegotiate our media types,
+ // and resubmit our previous frame (since the MFT appears
+ // to lose it otherwise).
+ if (mUseHwAccel && !CanUseDXVA(mediaType)) {
+ mDXVAEnabled = false;
+ if (!Init()) {
+ return E_FAIL;
+ }
+
+ mDecoder->Input(mLastInput);
+ return S_OK;
+ }
+
+ // Verify that the video subtype is what we expect it to be.
+ // When using hardware acceleration/DXVA2 the video format should
+ // be NV12, which is DXVA2's preferred format. For software decoding
+ // we use YV12, as that's easier for us to stick into our rendering
+ // pipeline than NV12. NV12 has interleaved UV samples, whereas YV12
+ // is a planar format.
+ GUID videoFormat;
+ hr = mediaType->GetGUID(MF_MT_SUBTYPE, &videoFormat);
+ NS_ENSURE_TRUE(videoFormat == MFVideoFormat_NV12 || !mUseHwAccel, E_FAIL);
+ NS_ENSURE_TRUE(videoFormat == MFVideoFormat_YV12 || mUseHwAccel, E_FAIL);
+
+ nsIntRect pictureRegion;
+ hr = GetPictureRegion(mediaType, pictureRegion);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ UINT32 width = pictureRegion.width;
+ UINT32 height = pictureRegion.height;
+ mImageSize = nsIntSize(width, height);
+ // Calculate and validate the picture region and frame dimensions after
+ // scaling by the pixel aspect ratio.
+ pictureRegion = mVideoInfo.ScaledImageRect(width, height);
+ if (!IsValidVideoRegion(mImageSize, pictureRegion, mVideoInfo.mDisplay)) {
+ // Video track's frame sizes will overflow. Ignore the video track.
+ return E_FAIL;
+ }
+
+ if (mDXVA2Manager) {
+ hr = mDXVA2Manager->ConfigureForSize(width, height);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ }
+
+ // Success! Save state.
+ GetDefaultStride(mediaType, width, &mVideoStride);
+
+ LOG("WMFVideoMFTManager frame geometry frame=(%u,%u) stride=%u picture=(%d, %d, %d, %d) display=(%d,%d)",
+ width, height,
+ mVideoStride,
+ pictureRegion.x, pictureRegion.y, pictureRegion.width, pictureRegion.height,
+ mVideoInfo.mDisplay.width, mVideoInfo.mDisplay.height);
+
+ return S_OK;
+}
+
+HRESULT
+WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
+ int64_t aStreamOffset,
+ VideoData** aOutVideoData)
+{
+ NS_ENSURE_TRUE(aSample, E_POINTER);
+ NS_ENSURE_TRUE(aOutVideoData, E_POINTER);
+
+ *aOutVideoData = nullptr;
+
+ HRESULT hr;
+ RefPtr<IMFMediaBuffer> buffer;
+
+ // Must convert to contiguous buffer to use IMD2DBuffer interface.
+ hr = aSample->ConvertToContiguousBuffer(getter_AddRefs(buffer));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+
+ // Try and use the IMF2DBuffer interface if available, otherwise fallback
+ // to the IMFMediaBuffer interface. Apparently IMF2DBuffer is more efficient,
+ // but only some systems (Windows 8?) support it.
+ BYTE* data = nullptr;
+ LONG stride = 0;
+ RefPtr<IMF2DBuffer> twoDBuffer;
+ hr = buffer->QueryInterface(static_cast<IMF2DBuffer**>(getter_AddRefs(twoDBuffer)));
+ if (SUCCEEDED(hr)) {
+ hr = twoDBuffer->Lock2D(&data, &stride);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ } else {
+ hr = buffer->Lock(&data, nullptr, nullptr);
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ stride = mVideoStride;
+ }
+
+ // YV12, planar format: [YYYY....][VVVV....][UUUU....]
+ // i.e., Y, then V, then U.
+ VideoData::YCbCrBuffer b;
+
+ uint32_t videoWidth = mImageSize.width;
+ uint32_t videoHeight = mImageSize.height;
+
+ // Y (Y') plane
+ b.mPlanes[0].mData = data;
+ b.mPlanes[0].mStride = stride;
+ b.mPlanes[0].mHeight = videoHeight;
+ b.mPlanes[0].mWidth = videoWidth;
+ b.mPlanes[0].mOffset = 0;
+ b.mPlanes[0].mSkip = 0;
+
+ // The V and U planes are stored 16-row-aligned, so we need to add padding
+ // to the row heights to ensure the Y'CbCr planes are referenced properly.
+ uint32_t padding = 0;
+ if (videoHeight % 16 != 0) {
+ padding = 16 - (videoHeight % 16);
+ }
+ uint32_t y_size = stride * (videoHeight + padding);
+ uint32_t v_size = stride * (videoHeight + padding) / 4;
+ uint32_t halfStride = (stride + 1) / 2;
+ uint32_t halfHeight = (videoHeight + 1) / 2;
+ uint32_t halfWidth = (videoWidth + 1) / 2;
+
+ // U plane (Cb)
+ b.mPlanes[1].mData = data + y_size + v_size;
+ b.mPlanes[1].mStride = halfStride;
+ b.mPlanes[1].mHeight = halfHeight;
+ b.mPlanes[1].mWidth = halfWidth;
+ b.mPlanes[1].mOffset = 0;
+ b.mPlanes[1].mSkip = 0;
+
+ // V plane (Cr)
+ b.mPlanes[2].mData = data + y_size;
+ b.mPlanes[2].mStride = halfStride;
+ b.mPlanes[2].mHeight = halfHeight;
+ b.mPlanes[2].mWidth = halfWidth;
+ b.mPlanes[2].mOffset = 0;
+ b.mPlanes[2].mSkip = 0;
+
+ media::TimeUnit pts = GetSampleTime(aSample);
+ NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
+ media::TimeUnit duration = GetSampleDuration(aSample);
+ NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
+ nsIntRect pictureRegion = mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
+
+ LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
+ if (backend != LayersBackend::LAYERS_D3D9 &&
+ backend != LayersBackend::LAYERS_D3D11) {
+ RefPtr<VideoData> v =
+ VideoData::CreateAndCopyData(mVideoInfo,
+ mImageContainer,
+ aStreamOffset,
+ pts.ToMicroseconds(),
+ duration.ToMicroseconds(),
+ b,
+ false,
+ -1,
+ pictureRegion);
+ if (twoDBuffer) {
+ twoDBuffer->Unlock2D();
+ } else {
+ buffer->Unlock();
+ }
+ v.forget(aOutVideoData);
+ return S_OK;
+ }
+
+ RefPtr<layers::PlanarYCbCrImage> image =
+ new IMFYCbCrImage(buffer, twoDBuffer);
+
+ VideoData::SetVideoDataToImage(image,
+ mVideoInfo,
+ b,
+ pictureRegion,
+ false);
+
+ RefPtr<VideoData> v =
+ VideoData::CreateFromImage(mVideoInfo,
+ aStreamOffset,
+ pts.ToMicroseconds(),
+ duration.ToMicroseconds(),
+ image.forget(),
+ false,
+ -1,
+ pictureRegion);
+
+ v.forget(aOutVideoData);
+ return S_OK;
+}
+
+HRESULT
+WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
+ int64_t aStreamOffset,
+ VideoData** aOutVideoData)
+{
+ NS_ENSURE_TRUE(aSample, E_POINTER);
+ NS_ENSURE_TRUE(aOutVideoData, E_POINTER);
+ NS_ENSURE_TRUE(mDXVA2Manager, E_ABORT);
+ NS_ENSURE_TRUE(mUseHwAccel, E_ABORT);
+
+ *aOutVideoData = nullptr;
+ HRESULT hr;
+
+ nsIntRect pictureRegion =
+ mVideoInfo.ScaledImageRect(mImageSize.width, mImageSize.height);
+ RefPtr<Image> image;
+ hr = mDXVA2Manager->CopyToImage(aSample,
+ pictureRegion,
+ getter_AddRefs(image));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ NS_ENSURE_TRUE(image, E_FAIL);
+
+ media::TimeUnit pts = GetSampleTime(aSample);
+ NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
+ media::TimeUnit duration = GetSampleDuration(aSample);
+ NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
+ RefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
+ aStreamOffset,
+ pts.ToMicroseconds(),
+ duration.ToMicroseconds(),
+ image.forget(),
+ false,
+ -1,
+ pictureRegion);
+
+ NS_ENSURE_TRUE(v, E_FAIL);
+ v.forget(aOutVideoData);
+
+ return S_OK;
+}
+
+// Blocks until decoded sample is produced by the deoder.
+HRESULT
+WMFVideoMFTManager::Output(int64_t aStreamOffset,
+ RefPtr<MediaData>& aOutData)
+{
+ RefPtr<IMFSample> sample;
+ HRESULT hr;
+ aOutData = nullptr;
+ int typeChangeCount = 0;
+ bool wasDraining = mDraining;
+ int64_t sampleCount = mSamplesCount;
+ if (wasDraining) {
+ mSamplesCount = 0;
+ mDraining = false;
+ }
+
+ media::TimeUnit pts;
+ media::TimeUnit duration;
+
+ // Loop until we decode a sample, or an unexpected error that we can't
+ // handle occurs.
+ while (true) {
+ hr = mDecoder->Output(&sample);
+ if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
+ return MF_E_TRANSFORM_NEED_MORE_INPUT;
+ }
+ if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
+ // Video stream output type change. Probably a geometric apperature
+ // change. Reconfigure the video geometry, so that we output the
+ // correct size frames.
+ MOZ_ASSERT(!sample);
+ hr = ConfigureVideoFrameGeometry();
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ // Catch infinite loops, but some decoders perform at least 2 stream
+ // changes on consecutive calls, so be permissive.
+ // 100 is arbitrarily > 2.
+ NS_ENSURE_TRUE(typeChangeCount < 100, MF_E_TRANSFORM_STREAM_CHANGE);
+ // Loop back and try decoding again...
+ ++typeChangeCount;
+ continue;
+ }
+ if (SUCCEEDED(hr)) {
+ if (!sample) {
+ LOG("Video MFTDecoder returned success but no output!");
+ // On some machines/input the MFT returns success but doesn't output
+ // a video frame. If we detect this, try again, but only up to a
+ // point; after 250 failures, give up. Note we count all failures
+ // over the life of the decoder, as we may end up exiting with a
+ // NEED_MORE_INPUT and coming back to hit the same error. So just
+ // counting with a local variable (like typeChangeCount does) may
+ // not work in this situation.
+ ++mNullOutputCount;
+ if (mNullOutputCount > 250) {
+ LOG("Excessive Video MFTDecoder returning success but no output; giving up");
+ mGotExcessiveNullOutput = true;
+ return E_FAIL;
+ }
+ continue;
+ }
+ pts = GetSampleTime(sample);
+ duration = GetSampleDuration(sample);
+ if (!pts.IsValid() || !duration.IsValid()) {
+ return E_FAIL;
+ }
+ if (wasDraining && sampleCount == 1 && pts == media::TimeUnit()) {
+ // WMF is unable to calculate a duration if only a single sample
+ // was parsed. Additionally, the pts always comes out at 0 under those
+ // circumstances.
+ // Seeing that we've only fed the decoder a single frame, the pts
+ // and duration are known, it's of the last sample.
+ pts = media::TimeUnit::FromMicroseconds(mLastTime);
+ duration = media::TimeUnit::FromMicroseconds(mLastDuration);
+ }
+ if (mSeekTargetThreshold.isSome()) {
+ if ((pts + duration) < mSeekTargetThreshold.ref()) {
+ LOG("Dropping video frame which pts is smaller than seek target.");
+ // It is necessary to clear the pointer to release the previous output
+ // buffer.
+ sample = nullptr;
+ continue;
+ }
+ mSeekTargetThreshold.reset();
+ }
+ break;
+ }
+ // Else unexpected error, assert, and bail.
+ NS_WARNING("WMFVideoMFTManager::Output() unexpected error");
+ return hr;
+ }
+
+ RefPtr<VideoData> frame;
+ if (mUseHwAccel) {
+ hr = CreateD3DVideoFrame(sample, aStreamOffset, getter_AddRefs(frame));
+ } else {
+ hr = CreateBasicVideoFrame(sample, aStreamOffset, getter_AddRefs(frame));
+ }
+ // Frame should be non null only when we succeeded.
+ MOZ_ASSERT((frame != nullptr) == SUCCEEDED(hr));
+ NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
+ NS_ENSURE_TRUE(frame, E_FAIL);
+
+ aOutData = frame;
+ // Set the potentially corrected pts and duration.
+ aOutData->mTime = pts.ToMicroseconds();
+ aOutData->mDuration = duration.ToMicroseconds();
+
+ if (mNullOutputCount) {
+ mGotValidOutputAfterNullOutput = true;
+ }
+
+ return S_OK;
+}
+
+void
+WMFVideoMFTManager::Shutdown()
+{
+ mDecoder = nullptr;
+ DeleteOnMainThread(mDXVA2Manager);
+}
+
+bool
+WMFVideoMFTManager::IsHardwareAccelerated(nsACString& aFailureReason) const
+{
+ aFailureReason = mDXVAFailureReason;
+ return mDecoder && mUseHwAccel;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.h b/dom/media/platforms/wmf/WMFVideoMFTManager.h
new file mode 100644
index 000000000..b8dfa6336
--- /dev/null
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(WMFVideoMFTManager_h_)
+#define WMFVideoMFTManager_h_
+
+#include "WMF.h"
+#include "MFTDecoder.h"
+#include "nsAutoPtr.h"
+#include "nsRect.h"
+#include "WMFMediaDataDecoder.h"
+#include "mozilla/RefPtr.h"
+
+namespace mozilla {
+
+class DXVA2Manager;
+
+class WMFVideoMFTManager : public MFTManager {
+public:
+ WMFVideoMFTManager(const VideoInfo& aConfig,
+ layers::KnowsCompositor* aKnowsCompositor,
+ layers::ImageContainer* aImageContainer,
+ bool aDXVAEnabled);
+ ~WMFVideoMFTManager();
+
+ bool Init();
+
+ HRESULT Input(MediaRawData* aSample) override;
+
+ HRESULT Output(int64_t aStreamOffset, RefPtr<MediaData>& aOutput) override;
+
+ void Shutdown() override;
+
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
+
+ TrackInfo::TrackType GetType() override {
+ return TrackInfo::kVideoTrack;
+ }
+
+ const char* GetDescriptionName() const override
+ {
+ nsCString failureReason;
+ return IsHardwareAccelerated(failureReason)
+ ? "wmf hardware video decoder" : "wmf software video decoder";
+ }
+
+ void Flush() override
+ {
+ MFTManager::Flush();
+ mDraining = false;
+ mSamplesCount = 0;
+ }
+
+ void Drain() override
+ {
+ MFTManager::Drain();
+ mDraining = true;
+ }
+
+private:
+
+ bool ValidateVideoInfo();
+
+ bool InitializeDXVA(bool aForceD3D9);
+
+ bool InitInternal(bool aForceD3D9);
+
+ HRESULT ConfigureVideoFrameGeometry();
+
+ HRESULT CreateBasicVideoFrame(IMFSample* aSample,
+ int64_t aStreamOffset,
+ VideoData** aOutVideoData);
+
+ HRESULT CreateD3DVideoFrame(IMFSample* aSample,
+ int64_t aStreamOffset,
+ VideoData** aOutVideoData);
+
+ HRESULT SetDecoderMediaTypes();
+
+ bool CanUseDXVA(IMFMediaType* aType);
+
+ // Video frame geometry.
+ VideoInfo mVideoInfo;
+ uint32_t mVideoStride;
+ nsIntSize mImageSize;
+
+ RefPtr<layers::ImageContainer> mImageContainer;
+ RefPtr<layers::KnowsCompositor> mKnowsCompositor;
+ nsAutoPtr<DXVA2Manager> mDXVA2Manager;
+
+ RefPtr<IMFSample> mLastInput;
+ float mLastDuration;
+ int64_t mLastTime = 0;
+ bool mDraining = false;
+ int64_t mSamplesCount = 0;
+
+ bool mDXVAEnabled;
+ bool mUseHwAccel;
+
+ nsCString mDXVAFailureReason;
+
+ enum StreamType {
+ Unknown,
+ H264,
+ VP8,
+ VP9
+ };
+
+ StreamType mStreamType;
+
+ const GUID& GetMFTGUID();
+ const GUID& GetMediaSubtypeGUID();
+
+ uint32_t mNullOutputCount;
+ bool mGotValidOutputAfterNullOutput;
+ bool mGotExcessiveNullOutput;
+ bool mIsValid;
+};
+
+} // namespace mozilla
+
+#endif // WMFVideoMFTManager_h_
diff --git a/dom/media/platforms/wmf/moz.build b/dom/media/platforms/wmf/moz.build
new file mode 100644
index 000000000..fa966bea2
--- /dev/null
+++ b/dom/media/platforms/wmf/moz.build
@@ -0,0 +1,34 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS += [
+ 'DXVA2Manager.h',
+ 'MFTDecoder.h',
+ 'WMF.h',
+ 'WMFAudioMFTManager.h',
+ 'WMFDecoderModule.h',
+ 'WMFMediaDataDecoder.h',
+ 'WMFUtils.h',
+ 'WMFVideoMFTManager.h',
+]
+UNIFIED_SOURCES += [
+ 'DXVA2Manager.cpp',
+ 'MFTDecoder.cpp',
+ 'WMFAudioMFTManager.cpp',
+ 'WMFDecoderModule.cpp',
+ 'WMFMediaDataDecoder.cpp',
+ 'WMFVideoMFTManager.cpp',
+]
+
+SOURCES += [
+ 'WMFUtils.cpp',
+]
+
+include('/ipc/chromium/chromium-config.mozbuild')
+
+FINAL_LIBRARY = 'xul'
+
+CXXFLAGS += CONFIG['MOZ_CAIRO_CFLAGS']
diff --git a/dom/media/platforms/wrappers/FuzzingWrapper.cpp b/dom/media/platforms/wrappers/FuzzingWrapper.cpp
new file mode 100644
index 000000000..7df020f46
--- /dev/null
+++ b/dom/media/platforms/wrappers/FuzzingWrapper.cpp
@@ -0,0 +1,328 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "FuzzingWrapper.h"
+
+mozilla::LogModule* GetFuzzingWrapperLog() {
+ static mozilla::LazyLogModule log("MediaFuzzingWrapper");
+ return log;
+}
+#define DFW_LOGD(arg, ...) MOZ_LOG(GetFuzzingWrapperLog(), mozilla::LogLevel::Debug, ("DecoderFuzzingWrapper(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+#define DFW_LOGV(arg, ...) MOZ_LOG(GetFuzzingWrapperLog(), mozilla::LogLevel::Verbose, ("DecoderFuzzingWrapper(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+#define CFW_LOGD(arg, ...) MOZ_LOG(GetFuzzingWrapperLog(), mozilla::LogLevel::Debug, ("DecoderCallbackFuzzingWrapper(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+#define CFW_LOGV(arg, ...) MOZ_LOG(GetFuzzingWrapperLog(), mozilla::LogLevel::Verbose, ("DecoderCallbackFuzzingWrapper(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
+
+namespace mozilla {
+
+DecoderFuzzingWrapper::DecoderFuzzingWrapper(
+ already_AddRefed<MediaDataDecoder> aDecoder,
+ already_AddRefed<DecoderCallbackFuzzingWrapper> aCallbackWrapper)
+ : mDecoder(aDecoder)
+ , mCallbackWrapper(aCallbackWrapper)
+{
+ DFW_LOGV("aDecoder=%p aCallbackWrapper=%p", mDecoder.get(), mCallbackWrapper.get());
+}
+
+DecoderFuzzingWrapper::~DecoderFuzzingWrapper()
+{
+ DFW_LOGV("");
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+DecoderFuzzingWrapper::Init()
+{
+ DFW_LOGV("");
+ MOZ_ASSERT(mDecoder);
+ return mDecoder->Init();
+}
+
+void
+DecoderFuzzingWrapper::Input(MediaRawData* aData)
+{
+ DFW_LOGV("aData.mTime=%lld", aData->mTime);
+ MOZ_ASSERT(mDecoder);
+ mDecoder->Input(aData);
+}
+
+void
+DecoderFuzzingWrapper::Flush()
+{
+ DFW_LOGV("Calling mDecoder[%p]->Flush()", mDecoder.get());
+ MOZ_ASSERT(mDecoder);
+ // Flush may output some frames (though unlikely).
+ // Flush may block a bit, it's ok if we output some frames in the meantime.
+ mDecoder->Flush();
+ DFW_LOGV("mDecoder[%p]->Flush()", mDecoder.get());
+ // Clear any delayed output we may have.
+ mCallbackWrapper->ClearDelayedOutput();
+}
+
+void
+DecoderFuzzingWrapper::Drain()
+{
+ DFW_LOGV("");
+ MOZ_ASSERT(mDecoder);
+ // Note: The decoder should callback DrainComplete(), we'll drain the
+ // delayed output (if any) then.
+ mDecoder->Drain();
+}
+
+void
+DecoderFuzzingWrapper::Shutdown()
+{
+ DFW_LOGV("");
+ MOZ_ASSERT(mDecoder);
+ // Both shutdowns below may block a bit.
+ mDecoder->Shutdown();
+ mCallbackWrapper->Shutdown();
+}
+
+bool
+DecoderFuzzingWrapper::IsHardwareAccelerated(nsACString& aFailureReason) const
+{
+ DFW_LOGV("");
+ MOZ_ASSERT(mDecoder);
+ return mDecoder->IsHardwareAccelerated(aFailureReason);
+}
+
+DecoderCallbackFuzzingWrapper::DecoderCallbackFuzzingWrapper(MediaDataDecoderCallback* aCallback)
+ : mCallback(aCallback)
+ , mDontDelayInputExhausted(false)
+ , mDraining(false)
+ , mTaskQueue(new TaskQueue(SharedThreadPool::Get(NS_LITERAL_CSTRING("MediaFuzzingWrapper"), 1)))
+{
+ CFW_LOGV("aCallback=%p", aCallback);
+}
+
+DecoderCallbackFuzzingWrapper::~DecoderCallbackFuzzingWrapper()
+{
+ CFW_LOGV("");
+}
+
+void
+DecoderCallbackFuzzingWrapper::SetVideoOutputMinimumInterval(
+ TimeDuration aFrameOutputMinimumInterval)
+{
+ CFW_LOGD("aFrameOutputMinimumInterval=%fms",
+ aFrameOutputMinimumInterval.ToMilliseconds());
+ mFrameOutputMinimumInterval = aFrameOutputMinimumInterval;
+}
+
+void
+DecoderCallbackFuzzingWrapper::SetDontDelayInputExhausted(
+ bool aDontDelayInputExhausted)
+{
+ CFW_LOGD("aDontDelayInputExhausted=%d",
+ aDontDelayInputExhausted);
+ mDontDelayInputExhausted = aDontDelayInputExhausted;
+}
+
+void
+DecoderCallbackFuzzingWrapper::Output(MediaData* aData)
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ nsCOMPtr<nsIRunnable> task =
+ NewRunnableMethod<StorensRefPtrPassByPtr<MediaData>>(
+ this, &DecoderCallbackFuzzingWrapper::Output, aData);
+ mTaskQueue->Dispatch(task.forget());
+ return;
+ }
+ CFW_LOGV("aData.mTime=%lld", aData->mTime);
+ MOZ_ASSERT(mCallback);
+ if (mFrameOutputMinimumInterval) {
+ if (!mPreviousOutput.IsNull()) {
+ if (!mDelayedOutput.empty()) {
+ // We already have some delayed frames, just add this one to the queue.
+ mDelayedOutput.push_back(MakePair<RefPtr<MediaData>, bool>(aData, false));
+ CFW_LOGD("delaying output of sample@%lld, total queued:%d",
+ aData->mTime, int(mDelayedOutput.size()));
+ return;
+ }
+ if (TimeStamp::Now() < mPreviousOutput + mFrameOutputMinimumInterval) {
+ // Frame arriving too soon after the previous one, start queuing.
+ mDelayedOutput.push_back(MakePair<RefPtr<MediaData>, bool>(aData, false));
+ CFW_LOGD("delaying output of sample@%lld, first queued", aData->mTime);
+ if (!mDelayedOutputTimer) {
+ mDelayedOutputTimer = new MediaTimer();
+ }
+ ScheduleOutputDelayedFrame();
+ return;
+ }
+ }
+ // If we're here, we're going to actually output a frame -> Record time.
+ mPreviousOutput = TimeStamp::Now();
+ }
+
+ // Passing the data straight through, no need to dispatch to another queue,
+ // callback should deal with that.
+ mCallback->Output(aData);
+}
+
+void
+DecoderCallbackFuzzingWrapper::Error(const MediaResult& aError)
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(NewRunnableMethod<MediaResult>(
+ this, &DecoderCallbackFuzzingWrapper::Error, aError));
+ return;
+ }
+ CFW_LOGV("");
+ MOZ_ASSERT(mCallback);
+ ClearDelayedOutput();
+ mCallback->Error(aError);
+}
+
+void
+DecoderCallbackFuzzingWrapper::InputExhausted()
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &DecoderCallbackFuzzingWrapper::InputExhausted));
+ return;
+ }
+ if (!mDontDelayInputExhausted && !mDelayedOutput.empty()) {
+ MediaDataAndInputExhausted& last = mDelayedOutput.back();
+ CFW_LOGD("InputExhausted delayed until after output of sample@%lld",
+ last.first()->mTime);
+ last.second() = true;
+ return;
+ }
+ CFW_LOGV("");
+ MOZ_ASSERT(mCallback);
+ mCallback->InputExhausted();
+}
+
+void
+DecoderCallbackFuzzingWrapper::DrainComplete()
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &DecoderCallbackFuzzingWrapper::DrainComplete));
+ return;
+ }
+ MOZ_ASSERT(mCallback);
+ if (mDelayedOutput.empty()) {
+ // No queued output -> Draining is complete now.
+ CFW_LOGV("No delayed output -> DrainComplete now");
+ mCallback->DrainComplete();
+ } else {
+ // Queued output waiting -> Make sure we call DrainComplete when it's empty.
+ CFW_LOGD("Delayed output -> DrainComplete later");
+ mDraining = true;
+ }
+}
+
+void
+DecoderCallbackFuzzingWrapper::ReleaseMediaResources()
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &DecoderCallbackFuzzingWrapper::ReleaseMediaResources));
+ return;
+ }
+ CFW_LOGV("");
+ MOZ_ASSERT(mCallback);
+ mCallback->ReleaseMediaResources();
+}
+
+bool
+DecoderCallbackFuzzingWrapper::OnReaderTaskQueue()
+{
+ CFW_LOGV("");
+ MOZ_ASSERT(mCallback);
+ return mCallback->OnReaderTaskQueue();
+}
+
+void
+DecoderCallbackFuzzingWrapper::ScheduleOutputDelayedFrame()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mDelayedOutputRequest.Exists()) {
+ // A delayed output is already scheduled, no need for more than one timer.
+ return;
+ }
+ RefPtr<DecoderCallbackFuzzingWrapper> self = this;
+ mDelayedOutputRequest.Begin(
+ mDelayedOutputTimer->WaitUntil(
+ mPreviousOutput + mFrameOutputMinimumInterval,
+ __func__)
+ ->Then(mTaskQueue, __func__,
+ [self] () -> void {
+ if (self->mDelayedOutputRequest.Exists()) {
+ self->mDelayedOutputRequest.Complete();
+ self->OutputDelayedFrame();
+ }
+ },
+ [self] () -> void {
+ if (self->mDelayedOutputRequest.Exists()) {
+ self->mDelayedOutputRequest.Complete();
+ self->ClearDelayedOutput();
+ }
+ }));
+}
+
+void
+DecoderCallbackFuzzingWrapper::OutputDelayedFrame()
+{
+ MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
+ if (mDelayedOutput.empty()) {
+ if (mDraining) {
+ // No more output, and we were draining -> Send DrainComplete.
+ mDraining = false;
+ mCallback->DrainComplete();
+ }
+ return;
+ }
+ MediaDataAndInputExhausted& data = mDelayedOutput.front();
+ CFW_LOGD("Outputting delayed sample@%lld, remaining:%d",
+ data.first()->mTime, int(mDelayedOutput.size() - 1));
+ mPreviousOutput = TimeStamp::Now();
+ mCallback->Output(data.first());
+ if (data.second()) {
+ CFW_LOGD("InputExhausted after delayed sample@%lld", data.first()->mTime);
+ mCallback->InputExhausted();
+ }
+ mDelayedOutput.pop_front();
+ if (!mDelayedOutput.empty()) {
+ // More output -> Send it later.
+ ScheduleOutputDelayedFrame();
+ } else if (mDraining) {
+ // No more output, and we were draining -> Send DrainComplete.
+ CFW_LOGD("DrainComplete");
+ mDraining = false;
+ mCallback->DrainComplete();
+ }
+}
+
+void
+DecoderCallbackFuzzingWrapper::ClearDelayedOutput()
+{
+ if (!mTaskQueue->IsCurrentThreadIn()) {
+ DFW_LOGV("(dispatching self)");
+ mTaskQueue->Dispatch(NewRunnableMethod(this, &DecoderCallbackFuzzingWrapper::ClearDelayedOutput));
+ return;
+ }
+ DFW_LOGV("");
+ // In case a timer hasn't lapsed yet, before destroying the timer and its
+ // attached waitUntil() promise, the 'Then' request must be disconnected.
+ mDelayedOutputRequest.DisconnectIfExists();
+ mDelayedOutputTimer = nullptr;
+ mDelayedOutput.clear();
+}
+
+void
+DecoderCallbackFuzzingWrapper::Shutdown()
+{
+ CFW_LOGV("Clear delayed output (if any) before shutting down mTaskQueue");
+ ClearDelayedOutput();
+ // Await idle here, so that 'ClearDelayedOutput' runs to completion before
+ // the task queue is shutdown (and tasks can't be queued anymore).
+ mTaskQueue->AwaitIdle();
+
+ CFW_LOGV("Shutting down mTaskQueue");
+ mTaskQueue->BeginShutdown();
+ mTaskQueue->AwaitIdle();
+ CFW_LOGV("mTaskQueue shut down");
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wrappers/FuzzingWrapper.h b/dom/media/platforms/wrappers/FuzzingWrapper.h
new file mode 100644
index 000000000..c2b737520
--- /dev/null
+++ b/dom/media/platforms/wrappers/FuzzingWrapper.h
@@ -0,0 +1,124 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(FuzzingWrapper_h_)
+#define FuzzingWrapper_h_
+
+#include "mozilla/Pair.h"
+#include "PlatformDecoderModule.h"
+
+#include <deque>
+
+namespace mozilla {
+
+// Fuzzing wrapper for media decoders.
+//
+// DecoderFuzzingWrapper owns the DecoderCallbackFuzzingWrapper, and inserts
+// itself between the reader and the decoder.
+// DecoderCallbackFuzzingWrapper inserts itself between a decoder and its
+// callback.
+// Together they are used to introduce some fuzzing, (e.g. delay output).
+//
+// Normally:
+// ====================================>
+// reader decoder
+// <------------------------------------
+//
+// With fuzzing:
+// ======> DecoderFuzzingWrapper ======>
+// reader v decoder
+// <-- DecoderCallbackFuzzingWrapper <--
+//
+// Creation order should be:
+// 1. Create DecoderCallbackFuzzingWrapper, give the expected callback target.
+// 2. Create actual decoder, give DecoderCallbackFuzzingWrapper as callback.
+// 3. Create DecoderFuzzingWrapper, give decoder and DecoderCallbackFuzzingWrapper.
+// DecoderFuzzingWrapper is what the reader sees as decoder, it owns the
+// real decoder and the DecoderCallbackFuzzingWrapper.
+
+class DecoderCallbackFuzzingWrapper : public MediaDataDecoderCallback
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecoderCallbackFuzzingWrapper)
+
+ explicit DecoderCallbackFuzzingWrapper(MediaDataDecoderCallback* aCallback);
+
+ // Enforce a minimum interval between output frames (i.e., limit frame rate).
+ // Of course, if the decoder is even slower, this won't have any effect.
+ void SetVideoOutputMinimumInterval(TimeDuration aFrameOutputMinimumInterval);
+ // If false (default), if frames are delayed, any InputExhausted is delayed to
+ // be later sent after the corresponding delayed frame.
+ // If true, InputExhausted are passed through immediately; This could result
+ // in lots of frames being decoded and queued for delayed output!
+ void SetDontDelayInputExhausted(bool aDontDelayInputExhausted);
+
+private:
+ virtual ~DecoderCallbackFuzzingWrapper();
+
+ // MediaDataDecoderCallback implementation.
+ void Output(MediaData* aData) override;
+ void Error(const MediaResult& aError) override;
+ void InputExhausted() override;
+ void DrainComplete() override;
+ void ReleaseMediaResources() override;
+ bool OnReaderTaskQueue() override;
+
+ MediaDataDecoderCallback* mCallback;
+
+ // Settings for minimum frame output interval & InputExhausted,
+ // should be set during init and then only read on mTaskQueue.
+ TimeDuration mFrameOutputMinimumInterval;
+ bool mDontDelayInputExhausted;
+ // Members for minimum frame output interval & InputExhausted,
+ // should only be accessed on mTaskQueue.
+ TimeStamp mPreviousOutput;
+ // First member is the frame to be delayed.
+ // Second member is true if an 'InputExhausted' arrived after that frame; in
+ // which case an InputExhausted will be sent after finally outputting the frame.
+ typedef Pair<RefPtr<MediaData>, bool> MediaDataAndInputExhausted;
+ std::deque<MediaDataAndInputExhausted> mDelayedOutput;
+ RefPtr<MediaTimer> mDelayedOutputTimer;
+ MozPromiseRequestHolder<MediaTimerPromise> mDelayedOutputRequest;
+ // If draining, a 'DrainComplete' will be sent after all delayed frames have
+ // been output.
+ bool mDraining;
+ // All callbacks are redirected through this task queue, both to avoid locking
+ // and to have a consistent sequencing of callbacks.
+ RefPtr<TaskQueue> mTaskQueue;
+ void ScheduleOutputDelayedFrame();
+ void OutputDelayedFrame();
+public: // public for the benefit of DecoderFuzzingWrapper.
+ void ClearDelayedOutput();
+ void Shutdown();
+};
+
+class DecoderFuzzingWrapper : public MediaDataDecoder
+{
+public:
+ DecoderFuzzingWrapper(already_AddRefed<MediaDataDecoder> aDecoder,
+ already_AddRefed<DecoderCallbackFuzzingWrapper> aCallbackWrapper);
+
+ // MediaDataDecoder implementation.
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
+ const char* GetDescriptionName() const override
+ {
+ return mDecoder->GetDescriptionName();
+ }
+
+private:
+ virtual ~DecoderFuzzingWrapper();
+ RefPtr<MediaDataDecoder> mDecoder;
+ RefPtr<DecoderCallbackFuzzingWrapper> mCallbackWrapper;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/dom/media/platforms/wrappers/H264Converter.cpp b/dom/media/platforms/wrappers/H264Converter.cpp
new file mode 100644
index 000000000..0edbfc10c
--- /dev/null
+++ b/dom/media/platforms/wrappers/H264Converter.cpp
@@ -0,0 +1,311 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/TaskQueue.h"
+
+#include "H264Converter.h"
+#include "ImageContainer.h"
+#include "MediaInfo.h"
+#include "mp4_demuxer/AnnexB.h"
+#include "mp4_demuxer/H264.h"
+
+namespace mozilla
+{
+
+H264Converter::H264Converter(PlatformDecoderModule* aPDM,
+ const CreateDecoderParams& aParams)
+ : mPDM(aPDM)
+ , mCurrentConfig(aParams.VideoConfig())
+ , mKnowsCompositor(aParams.mKnowsCompositor)
+ , mImageContainer(aParams.mImageContainer)
+ , mTaskQueue(aParams.mTaskQueue)
+ , mCallback(aParams.mCallback)
+ , mDecoder(nullptr)
+ , mGMPCrashHelper(aParams.mCrashHelper)
+ , mNeedAVCC(aPDM->DecoderNeedsConversion(aParams.mConfig)
+ == PlatformDecoderModule::ConversionRequired::kNeedAVCC)
+ , mLastError(NS_OK)
+{
+ CreateDecoder(aParams.mDiagnostics);
+}
+
+H264Converter::~H264Converter()
+{
+}
+
+RefPtr<MediaDataDecoder::InitPromise>
+H264Converter::Init()
+{
+ if (mDecoder) {
+ return mDecoder->Init();
+ }
+
+ // We haven't been able to initialize a decoder due to a missing SPS/PPS.
+ return MediaDataDecoder::InitPromise::CreateAndResolve(
+ TrackType::kVideoTrack, __func__);
+}
+
+void
+H264Converter::Input(MediaRawData* aSample)
+{
+ if (!mp4_demuxer::AnnexB::ConvertSampleToAVCC(aSample)) {
+ // We need AVCC content to be able to later parse the SPS.
+ // This is a no-op if the data is already AVCC.
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("ConvertSampleToAVCC")));
+ return;
+ }
+
+ if (mInitPromiseRequest.Exists()) {
+ if (mNeedKeyframe) {
+ if (!aSample->mKeyframe) {
+ // Frames dropped, we need a new one.
+ mCallback->InputExhausted();
+ return;
+ }
+ mNeedKeyframe = false;
+ }
+ mMediaRawSamples.AppendElement(aSample);
+ return;
+ }
+
+ nsresult rv;
+ if (!mDecoder) {
+ // It is not possible to create an AVCC H264 decoder without SPS.
+ // As such, creation will fail if the extra_data just extracted doesn't
+ // contain a SPS.
+ rv = CreateDecoderAndInit(aSample);
+ if (rv == NS_ERROR_NOT_INITIALIZED) {
+ // We are missing the required SPS to create the decoder.
+ // Ignore for the time being, the MediaRawData will be dropped.
+ mCallback->InputExhausted();
+ return;
+ }
+ } else {
+ rv = CheckForSPSChange(aSample);
+ if (rv == NS_ERROR_NOT_INITIALIZED) {
+ // The decoder is pending initialization.
+ mCallback->InputExhausted();
+ return;
+ }
+ }
+ if (NS_FAILED(rv)) {
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Unable to create H264 decoder")));
+ return;
+ }
+
+ if (mNeedKeyframe && !aSample->mKeyframe) {
+ mCallback->InputExhausted();
+ return;
+ }
+
+ if (!mNeedAVCC &&
+ !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(aSample, mNeedKeyframe)) {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("ConvertSampleToAnnexB")));
+ return;
+ }
+
+ mNeedKeyframe = false;
+
+ aSample->mExtraData = mCurrentConfig.mExtraData;
+
+ mDecoder->Input(aSample);
+}
+
+void
+H264Converter::Flush()
+{
+ mNeedKeyframe = true;
+ if (mDecoder) {
+ mDecoder->Flush();
+ }
+}
+
+void
+H264Converter::Drain()
+{
+ mNeedKeyframe = true;
+ if (mDecoder) {
+ mDecoder->Drain();
+ return;
+ }
+ mCallback->DrainComplete();
+}
+
+void
+H264Converter::Shutdown()
+{
+ if (mDecoder) {
+ mDecoder->Shutdown();
+ mInitPromiseRequest.DisconnectIfExists();
+ mDecoder = nullptr;
+ }
+}
+
+bool
+H264Converter::IsHardwareAccelerated(nsACString& aFailureReason) const
+{
+ if (mDecoder) {
+ return mDecoder->IsHardwareAccelerated(aFailureReason);
+ }
+ return MediaDataDecoder::IsHardwareAccelerated(aFailureReason);
+}
+
+void
+H264Converter::SetSeekThreshold(const media::TimeUnit& aTime)
+{
+ if (mDecoder) {
+ mDecoder->SetSeekThreshold(aTime);
+ } else {
+ MediaDataDecoder::SetSeekThreshold(aTime);
+ }
+}
+
+nsresult
+H264Converter::CreateDecoder(DecoderDoctorDiagnostics* aDiagnostics)
+{
+ if (!mp4_demuxer::AnnexB::HasSPS(mCurrentConfig.mExtraData)) {
+ // nothing found yet, will try again later
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ UpdateConfigFromExtraData(mCurrentConfig.mExtraData);
+
+ mp4_demuxer::SPSData spsdata;
+ if (mp4_demuxer::H264::DecodeSPSFromExtraData(mCurrentConfig.mExtraData, spsdata)) {
+ // Do some format check here.
+ // WMF H.264 Video Decoder and Apple ATDecoder do not support YUV444 format.
+ if (spsdata.profile_idc == 244 /* Hi444PP */ ||
+ spsdata.chroma_format_idc == PDMFactory::kYUV444) {
+ mLastError = NS_ERROR_FAILURE;
+ if (aDiagnostics) {
+ aDiagnostics->SetVideoNotSupported();
+ }
+ return NS_ERROR_FAILURE;
+ }
+ } else {
+ // SPS was invalid.
+ mLastError = NS_ERROR_FAILURE;
+ return NS_ERROR_FAILURE;
+ }
+
+ mDecoder = mPDM->CreateVideoDecoder({
+ mCurrentConfig,
+ mTaskQueue,
+ mCallback,
+ aDiagnostics,
+ mImageContainer,
+ mKnowsCompositor,
+ mGMPCrashHelper
+ });
+
+ if (!mDecoder) {
+ mLastError = NS_ERROR_FAILURE;
+ return NS_ERROR_FAILURE;
+ }
+
+ mNeedKeyframe = true;
+
+ return NS_OK;
+}
+
+nsresult
+H264Converter::CreateDecoderAndInit(MediaRawData* aSample)
+{
+ RefPtr<MediaByteBuffer> extra_data =
+ mp4_demuxer::AnnexB::ExtractExtraData(aSample);
+ if (!mp4_demuxer::AnnexB::HasSPS(extra_data)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ UpdateConfigFromExtraData(extra_data);
+
+ nsresult rv = CreateDecoder(/* DecoderDoctorDiagnostics* */ nullptr);
+
+ if (NS_SUCCEEDED(rv)) {
+ // Queue the incoming sample.
+ mMediaRawSamples.AppendElement(aSample);
+
+ mInitPromiseRequest.Begin(mDecoder->Init()
+ ->Then(AbstractThread::GetCurrent()->AsTaskQueue(), __func__, this,
+ &H264Converter::OnDecoderInitDone,
+ &H264Converter::OnDecoderInitFailed));
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ return rv;
+}
+
+void
+H264Converter::OnDecoderInitDone(const TrackType aTrackType)
+{
+ mInitPromiseRequest.Complete();
+ bool gotInput = false;
+ for (uint32_t i = 0 ; i < mMediaRawSamples.Length(); i++) {
+ const RefPtr<MediaRawData>& sample = mMediaRawSamples[i];
+ if (mNeedKeyframe) {
+ if (!sample->mKeyframe) {
+ continue;
+ }
+ mNeedKeyframe = false;
+ }
+ if (!mNeedAVCC &&
+ !mp4_demuxer::AnnexB::ConvertSampleToAnnexB(sample, mNeedKeyframe)) {
+ mCallback->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY,
+ RESULT_DETAIL("ConvertSampleToAnnexB")));
+ mMediaRawSamples.Clear();
+ return;
+ }
+ mDecoder->Input(sample);
+ }
+ if (!gotInput) {
+ mCallback->InputExhausted();
+ }
+ mMediaRawSamples.Clear();
+}
+
+void
+H264Converter::OnDecoderInitFailed(MediaResult aError)
+{
+ mInitPromiseRequest.Complete();
+ mCallback->Error(
+ MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+ RESULT_DETAIL("Unable to initialize H264 decoder")));
+}
+
+nsresult
+H264Converter::CheckForSPSChange(MediaRawData* aSample)
+{
+ RefPtr<MediaByteBuffer> extra_data =
+ mp4_demuxer::AnnexB::ExtractExtraData(aSample);
+ if (!mp4_demuxer::AnnexB::HasSPS(extra_data) ||
+ mp4_demuxer::AnnexB::CompareExtraData(extra_data,
+ mCurrentConfig.mExtraData)) {
+ return NS_OK;
+ }
+ // The SPS has changed, signal to flush the current decoder and create a
+ // new one.
+ mDecoder->Flush();
+ Shutdown();
+ return CreateDecoderAndInit(aSample);
+}
+
+void
+H264Converter::UpdateConfigFromExtraData(MediaByteBuffer* aExtraData)
+{
+ mp4_demuxer::SPSData spsdata;
+ if (mp4_demuxer::H264::DecodeSPSFromExtraData(aExtraData, spsdata) &&
+ spsdata.pic_width > 0 && spsdata.pic_height > 0) {
+ mp4_demuxer::H264::EnsureSPSIsSane(spsdata);
+ mCurrentConfig.mImage.width = spsdata.pic_width;
+ mCurrentConfig.mImage.height = spsdata.pic_height;
+ mCurrentConfig.mDisplay.width = spsdata.display_width;
+ mCurrentConfig.mDisplay.height = spsdata.display_height;
+ }
+ mCurrentConfig.mExtraData = aExtraData;
+}
+
+} // namespace mozilla
diff --git a/dom/media/platforms/wrappers/H264Converter.h b/dom/media/platforms/wrappers/H264Converter.h
new file mode 100644
index 000000000..6905b1c74
--- /dev/null
+++ b/dom/media/platforms/wrappers/H264Converter.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_H264Converter_h
+#define mozilla_H264Converter_h
+
+#include "PlatformDecoderModule.h"
+
+namespace mozilla {
+
+// H264Converter is a MediaDataDecoder wrapper used to ensure that
+// only AVCC or AnnexB is fed to the underlying MediaDataDecoder.
+// The H264Converter allows playback of content where the SPS NAL may not be
+// provided in the init segment (e.g. AVC3 or Annex B)
+// H264Converter will monitor the input data, and will delay creation of the
+// MediaDataDecoder until a SPS and PPS NALs have been extracted.
+
+class H264Converter : public MediaDataDecoder {
+public:
+
+ H264Converter(PlatformDecoderModule* aPDM,
+ const CreateDecoderParams& aParams);
+ virtual ~H264Converter();
+
+ RefPtr<InitPromise> Init() override;
+ void Input(MediaRawData* aSample) override;
+ void Flush() override;
+ void Drain() override;
+ void Shutdown() override;
+ bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
+ const char* GetDescriptionName() const override
+ {
+ if (mDecoder) {
+ return mDecoder->GetDescriptionName();
+ }
+ return "H264Converter decoder (pending)";
+ }
+ void SetSeekThreshold(const media::TimeUnit& aTime) override;
+
+ nsresult GetLastError() const { return mLastError; }
+
+private:
+ // Will create the required MediaDataDecoder if need AVCC and we have a SPS NAL.
+ // Returns NS_ERROR_FAILURE if error is permanent and can't be recovered and
+ // will set mError accordingly.
+ nsresult CreateDecoder(DecoderDoctorDiagnostics* aDiagnostics);
+ nsresult CreateDecoderAndInit(MediaRawData* aSample);
+ nsresult CheckForSPSChange(MediaRawData* aSample);
+ void UpdateConfigFromExtraData(MediaByteBuffer* aExtraData);
+
+ void OnDecoderInitDone(const TrackType aTrackType);
+ void OnDecoderInitFailed(MediaResult aError);
+
+ RefPtr<PlatformDecoderModule> mPDM;
+ VideoInfo mCurrentConfig;
+ RefPtr<layers::KnowsCompositor> mKnowsCompositor;
+ RefPtr<layers::ImageContainer> mImageContainer;
+ const RefPtr<TaskQueue> mTaskQueue;
+ nsTArray<RefPtr<MediaRawData>> mMediaRawSamples;
+ MediaDataDecoderCallback* mCallback;
+ RefPtr<MediaDataDecoder> mDecoder;
+ MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
+ RefPtr<GMPCrashHelper> mGMPCrashHelper;
+ bool mNeedAVCC;
+ nsresult mLastError;
+ bool mNeedKeyframe = true;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_H264Converter_h