summaryrefslogtreecommitdiffstats
path: root/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp')
-rw-r--r--dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp75
1 files changed, 65 insertions, 10 deletions
diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
index aec1e9136..f3101e44c 100644
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -174,12 +174,15 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample, bool* aGotFrame)
uint8_t* inputData = const_cast<uint8_t*>(aSample->Data());
size_t inputSize = aSample->Size();
-#if LIBAVCODEC_VERSION_MAJOR >= 54
+#if LIBAVCODEC_VERSION_MAJOR >= 54 && LIBAVCODEC_VERSION_MAJOR < 58
if (inputSize && mCodecParser && (mCodecID == AV_CODEC_ID_VP8
-#if LIBAVCODEC_VERSION_MAJOR >= 55
+#if LIBAVCODEC_VERSION_MAJOR >= 55 && LIBAVCODEC_VERSION_MAJOR < 58
|| mCodecID == AV_CODEC_ID_VP9
#endif
- )) {
+ ))
+#endif
+#if LIBAVCODEC_VERSION_MAJOR >= 54
+ {
while (inputSize) {
uint8_t* data = inputData;
int size = inputSize;
@@ -224,6 +227,48 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
packet.flags = aSample->mKeyframe ? AV_PKT_FLAG_KEY : 0;
packet.pos = aSample->mOffset;
+#if LIBAVCODEC_VERSION_MAJOR >= 58
+ packet.duration = aSample->mDuration;
+ int res = mLib->avcodec_send_packet(mCodecContext, &packet);
+ if (res < 0) {
+ // In theory, avcodec_send_packet could sent -EAGAIN should its internal
+ // buffers be full. In practice this can't happen as we only feed one frame
+ // at a time, and we immediately call avcodec_receive_frame right after.
+ FFMPEG_LOG("avcodec_send_packet error: %d", res);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("avcodec_send_packet error: %d", res));
+ }
+
+ if (aGotFrame) {
+ *aGotFrame = false;
+ }
+ do {
+ if (!PrepareFrame()) {
+ NS_WARNING("FFmpeg h264 decoder failed to allocate frame.");
+ return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
+ }
+ res = mLib->avcodec_receive_frame(mCodecContext, mFrame);
+ if (res == int(AVERROR_EOF)) {
+ return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
+ }
+ if (res == AVERROR(EAGAIN)) {
+ return NS_OK;
+ }
+ if (res < 0) {
+ FFMPEG_LOG("avcodec_receive_frame error: %d", res);
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("avcodec_receive_frame error: %d", res));
+ }
+ MediaResult rv = CreateImage(mFrame->pkt_pos, mFrame->pkt_pts,
+ mFrame->pkt_duration);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (aGotFrame) {
+ *aGotFrame = true;
+ }
+ } while (true);
+#else
// LibAV provides no API to retrieve the decoded sample's duration.
// (FFmpeg >= 1.0 provides av_frame_get_pkt_duration)
// As such we instead use a map using the dts as key that we will retrieve
@@ -276,8 +321,21 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
// against the map becoming extremely big.
mDurationMap.Clear();
}
+
+ MediaResult rv = CreateImage(aSample->mOffset, pts, duration);
+ if (NS_SUCCEEDED(rv) && aGotFrame) {
+ *aGotFrame = true;
+ }
+ return rv;
+#endif
+}
+
+MediaResult
+FFmpegVideoDecoder<LIBAV_VER>::CreateImage(int64_t aOffset, int64_t aPts,
+ int64_t aDuration)
+{
FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
- pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
+ aPts, mFrame->pkt_dts, aDuration, mCodecContext->reordered_opaque);
VideoData::YCbCrBuffer b;
b.mPlanes[0].mData = mFrame->data[0];
@@ -317,9 +375,9 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mInfo,
mImageContainer,
- aSample->mOffset,
- pts,
- duration,
+ aOffset,
+ aPts,
+ aDuration,
b,
!!mFrame->key_frame,
-1,
@@ -331,9 +389,6 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
RESULT_DETAIL("image allocation error"));
}
mCallback->Output(v);
- if (aGotFrame) {
- *aGotFrame = true;
- }
return NS_OK;
}