diff options
Diffstat (limited to 'media/libvpx/vp8/decoder')
-rw-r--r-- | media/libvpx/vp8/decoder/dboolhuff.c | 76 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/dboolhuff.h | 141 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/decodeframe.c | 1397 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/decodemv.c | 670 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/decodemv.h | 26 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/decoderthreading.h | 30 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/detokenize.c | 245 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/detokenize.h | 27 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/ec_types.h | 58 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/error_concealment.c | 596 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/error_concealment.h | 49 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/onyxd_if.c | 505 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/onyxd_int.h | 159 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/threading.c | 915 | ||||
-rw-r--r-- | media/libvpx/vp8/decoder/treereader.h | 48 |
15 files changed, 4942 insertions, 0 deletions
diff --git a/media/libvpx/vp8/decoder/dboolhuff.c b/media/libvpx/vp8/decoder/dboolhuff.c new file mode 100644 index 000000000..b874d4c46 --- /dev/null +++ b/media/libvpx/vp8/decoder/dboolhuff.c @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#include "dboolhuff.h" +#include "vp8/common/common.h" + +int vp8dx_start_decode(BOOL_DECODER *br, + const unsigned char *source, + unsigned int source_sz, + vpx_decrypt_cb decrypt_cb, + void *decrypt_state) +{ + br->user_buffer_end = source+source_sz; + br->user_buffer = source; + br->value = 0; + br->count = -8; + br->range = 255; + br->decrypt_cb = decrypt_cb; + br->decrypt_state = decrypt_state; + + if (source_sz && !source) + return 1; + + /* Populate the buffer */ + vp8dx_bool_decoder_fill(br); + + return 0; +} + +void vp8dx_bool_decoder_fill(BOOL_DECODER *br) +{ + const unsigned char *bufptr = br->user_buffer; + VP8_BD_VALUE value = br->value; + int count = br->count; + int shift = VP8_BD_VALUE_SIZE - CHAR_BIT - (count + CHAR_BIT); + size_t bytes_left = br->user_buffer_end - bufptr; + size_t bits_left = bytes_left * CHAR_BIT; + int x = (int)(shift + CHAR_BIT - bits_left); + int loop_end = 0; + unsigned char decrypted[sizeof(VP8_BD_VALUE) + 1]; + + if (br->decrypt_cb) { + size_t n = MIN(sizeof(decrypted), bytes_left); + br->decrypt_cb(br->decrypt_state, bufptr, decrypted, (int)n); + bufptr = decrypted; + } + + if(x >= 0) + { + count += VP8_LOTS_OF_BITS; + loop_end = x; + } + + if (x < 0 || bits_left) + { + while(shift >= loop_end) + { + count += CHAR_BIT; + value |= (VP8_BD_VALUE)*bufptr << shift; + ++bufptr; + ++br->user_buffer; + shift -= CHAR_BIT; + } + } + + br->value = value; + br->count = count; +} diff --git a/media/libvpx/vp8/decoder/dboolhuff.h b/media/libvpx/vp8/decoder/dboolhuff.h new file mode 100644 index 000000000..51c5adc28 --- /dev/null +++ b/media/libvpx/vp8/decoder/dboolhuff.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#ifndef VP8_DECODER_DBOOLHUFF_H_ +#define VP8_DECODER_DBOOLHUFF_H_ + +#include <stddef.h> +#include <limits.h> + +#include "vpx_config.h" +#include "vpx_ports/mem.h" +#include "vpx/vp8dx.h" +#include "vpx/vpx_integer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef size_t VP8_BD_VALUE; + +#define VP8_BD_VALUE_SIZE ((int)sizeof(VP8_BD_VALUE)*CHAR_BIT) + +/*This is meant to be a large, positive constant that can still be efficiently + loaded as an immediate (on platforms like ARM, for example). + Even relatively modest values like 100 would work fine.*/ +#define VP8_LOTS_OF_BITS (0x40000000) + +typedef struct +{ + const unsigned char *user_buffer_end; + const unsigned char *user_buffer; + VP8_BD_VALUE value; + int count; + unsigned int range; + vpx_decrypt_cb decrypt_cb; + void *decrypt_state; +} BOOL_DECODER; + +DECLARE_ALIGNED(16, extern const unsigned char, vp8_norm[256]); + +int vp8dx_start_decode(BOOL_DECODER *br, + const unsigned char *source, + unsigned int source_sz, + vpx_decrypt_cb decrypt_cb, + void *decrypt_state); + +void vp8dx_bool_decoder_fill(BOOL_DECODER *br); + + +static int vp8dx_decode_bool(BOOL_DECODER *br, int probability) { + unsigned int bit = 0; + VP8_BD_VALUE value; + unsigned int split; + VP8_BD_VALUE bigsplit; + int count; + unsigned int range; + + split = 1 + (((br->range - 1) * probability) >> 8); + + if(br->count < 0) + vp8dx_bool_decoder_fill(br); + + value = br->value; + count = br->count; + + bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8); + + range = split; + + if (value >= bigsplit) + { + range = br->range - split; + value = value - bigsplit; + bit = 1; + } + + { + register unsigned int shift = vp8_norm[range]; + range <<= shift; + value <<= shift; + count -= shift; + } + br->value = value; + br->count = count; + br->range = range; + + return bit; +} + +static int vp8_decode_value(BOOL_DECODER *br, int bits) +{ + int z = 0; + int bit; + + for (bit = bits - 1; bit >= 0; bit--) + { + z |= (vp8dx_decode_bool(br, 0x80) << bit); + } + + return z; +} + +static int vp8dx_bool_error(BOOL_DECODER *br) +{ + /* Check if we have reached the end of the buffer. + * + * Variable 'count' stores the number of bits in the 'value' buffer, minus + * 8. The top byte is part of the algorithm, and the remainder is buffered + * to be shifted into it. So if count == 8, the top 16 bits of 'value' are + * occupied, 8 for the algorithm and 8 in the buffer. + * + * When reading a byte from the user's buffer, count is filled with 8 and + * one byte is filled into the value buffer. When we reach the end of the + * data, count is additionally filled with VP8_LOTS_OF_BITS. So when + * count == VP8_LOTS_OF_BITS - 1, the user's data has been exhausted. + */ + if ((br->count > VP8_BD_VALUE_SIZE) && (br->count < VP8_LOTS_OF_BITS)) + { + /* We have tried to decode bits after the end of + * stream was encountered. + */ + return 1; + } + + /* No error. */ + return 0; +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_DBOOLHUFF_H_ diff --git a/media/libvpx/vp8/decoder/decodeframe.c b/media/libvpx/vp8/decoder/decodeframe.c new file mode 100644 index 000000000..4d247cbf8 --- /dev/null +++ b/media/libvpx/vp8/decoder/decodeframe.c @@ -0,0 +1,1397 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#include "vpx_config.h" +#include "vp8_rtcd.h" +#include "./vpx_scale_rtcd.h" +#include "onyxd_int.h" +#include "vp8/common/header.h" +#include "vp8/common/reconintra4x4.h" +#include "vp8/common/reconinter.h" +#include "detokenize.h" +#include "vp8/common/common.h" +#include "vp8/common/invtrans.h" +#include "vp8/common/alloccommon.h" +#include "vp8/common/entropymode.h" +#include "vp8/common/quant_common.h" +#include "vpx_scale/vpx_scale.h" +#include "vp8/common/setupintrarecon.h" + +#include "decodemv.h" +#include "vp8/common/extend.h" +#if CONFIG_ERROR_CONCEALMENT +#include "error_concealment.h" +#endif +#include "vpx_mem/vpx_mem.h" +#include "vp8/common/threading.h" +#include "decoderthreading.h" +#include "dboolhuff.h" + +#include <assert.h> +#include <stdio.h> + +void vp8cx_init_de_quantizer(VP8D_COMP *pbi) +{ + int Q; + VP8_COMMON *const pc = & pbi->common; + + for (Q = 0; Q < QINDEX_RANGE; Q++) + { + pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q); + pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q); + pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q); + + pc->Y1dequant[Q][1] = (short)vp8_ac_yquant(Q); + pc->Y2dequant[Q][1] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q); + pc->UVdequant[Q][1] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q); + } +} + +void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) +{ + int i; + int QIndex; + MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; + VP8_COMMON *const pc = & pbi->common; + + /* Decide whether to use the default or alternate baseline Q value. */ + if (xd->segmentation_enabled) + { + /* Abs Value */ + if (xd->mb_segement_abs_delta == SEGMENT_ABSDATA) + QIndex = xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id]; + + /* Delta Value */ + else + QIndex = pc->base_qindex + xd->segment_feature_data[MB_LVL_ALT_Q][mbmi->segment_id]; + + QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; /* Clamp to valid range */ + } + else + QIndex = pc->base_qindex; + + /* Set up the macroblock dequant constants */ + xd->dequant_y1_dc[0] = 1; + xd->dequant_y1[0] = pc->Y1dequant[QIndex][0]; + xd->dequant_y2[0] = pc->Y2dequant[QIndex][0]; + xd->dequant_uv[0] = pc->UVdequant[QIndex][0]; + + for (i = 1; i < 16; i++) + { + xd->dequant_y1_dc[i] = + xd->dequant_y1[i] = pc->Y1dequant[QIndex][1]; + xd->dequant_y2[i] = pc->Y2dequant[QIndex][1]; + xd->dequant_uv[i] = pc->UVdequant[QIndex][1]; + } +} + +static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, + unsigned int mb_idx) +{ + MB_PREDICTION_MODE mode; + int i; +#if CONFIG_ERROR_CONCEALMENT + int corruption_detected = 0; +#else + (void)mb_idx; +#endif + + if (xd->mode_info_context->mbmi.mb_skip_coeff) + { + vp8_reset_mb_tokens_context(xd); + } + else if (!vp8dx_bool_error(xd->current_bc)) + { + int eobtotal; + eobtotal = vp8_decode_mb_tokens(pbi, xd); + + /* Special case: Force the loopfilter to skip when eobtotal is zero */ + xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0); + } + + mode = xd->mode_info_context->mbmi.mode; + + if (xd->segmentation_enabled) + vp8_mb_init_dequantizer(pbi, xd); + + +#if CONFIG_ERROR_CONCEALMENT + + if(pbi->ec_active) + { + int throw_residual; + /* When we have independent partitions we can apply residual even + * though other partitions within the frame are corrupt. + */ + throw_residual = (!pbi->independent_partitions && + pbi->frame_corrupt_residual); + throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); + + if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) + { + /* MB with corrupt residuals or corrupt mode/motion vectors. + * Better to use the predictor as reconstruction. + */ + pbi->frame_corrupt_residual = 1; + memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); + vp8_conceal_corrupt_mb(xd); + + + corruption_detected = 1; + + /* force idct to be skipped for B_PRED and use the + * prediction only for reconstruction + * */ + memset(xd->eobs, 0, 25); + } + } +#endif + + /* do prediction */ + if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) + { + vp8_build_intra_predictors_mbuv_s(xd, + xd->recon_above[1], + xd->recon_above[2], + xd->recon_left[1], + xd->recon_left[2], + xd->recon_left_stride[1], + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride); + + if (mode != B_PRED) + { + vp8_build_intra_predictors_mby_s(xd, + xd->recon_above[0], + xd->recon_left[0], + xd->recon_left_stride[0], + xd->dst.y_buffer, + xd->dst.y_stride); + } + else + { + short *DQC = xd->dequant_y1; + int dst_stride = xd->dst.y_stride; + + /* clear out residual eob info */ + if(xd->mode_info_context->mbmi.mb_skip_coeff) + memset(xd->eobs, 0, 25); + + intra_prediction_down_copy(xd, xd->recon_above[0] + 16); + + for (i = 0; i < 16; i++) + { + BLOCKD *b = &xd->block[i]; + unsigned char *dst = xd->dst.y_buffer + b->offset; + B_PREDICTION_MODE b_mode = + xd->mode_info_context->bmi[i].as_mode; + unsigned char *Above = dst - dst_stride; + unsigned char *yleft = dst - 1; + int left_stride = dst_stride; + unsigned char top_left = Above[-1]; + + vp8_intra4x4_predict(Above, yleft, left_stride, b_mode, + dst, dst_stride, top_left); + + if (xd->eobs[i]) + { + if (xd->eobs[i] > 1) + { + vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); + } + else + { + vp8_dc_only_idct_add + (b->qcoeff[0] * DQC[0], + dst, dst_stride, + dst, dst_stride); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + } + } + } + } + else + { + vp8_build_inter_predictors_mb(xd); + } + + +#if CONFIG_ERROR_CONCEALMENT + if (corruption_detected) + { + return; + } +#endif + + if(!xd->mode_info_context->mbmi.mb_skip_coeff) + { + /* dequantization and idct */ + if (mode != B_PRED) + { + short *DQC = xd->dequant_y1; + + if (mode != SPLITMV) + { + BLOCKD *b = &xd->block[24]; + + /* do 2nd order transform on the dc block */ + if (xd->eobs[24] > 1) + { + vp8_dequantize_b(b, xd->dequant_y2); + + vp8_short_inv_walsh4x4(&b->dqcoeff[0], + xd->qcoeff); + memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); + } + else + { + b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; + vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], + xd->qcoeff); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + + /* override the dc dequant constant in order to preserve the + * dc components + */ + DQC = xd->dequant_y1_dc; + } + + vp8_dequant_idct_add_y_block + (xd->qcoeff, DQC, + xd->dst.y_buffer, + xd->dst.y_stride, xd->eobs); + } + + vp8_dequant_idct_add_uv_block + (xd->qcoeff+16*16, xd->dequant_uv, + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride, xd->eobs+16); + } +} + +static int get_delta_q(vp8_reader *bc, int prev, int *q_update) +{ + int ret_val = 0; + + if (vp8_read_bit(bc)) + { + ret_val = vp8_read_literal(bc, 4); + + if (vp8_read_bit(bc)) + ret_val = -ret_val; + } + + /* Trigger a quantizer update if the delta-q value has changed */ + if (ret_val != prev) + *q_update = 1; + + return ret_val; +} + +#ifdef PACKET_TESTING +#include <stdio.h> +FILE *vpxlog = 0; +#endif + +static void yv12_extend_frame_top_c(YV12_BUFFER_CONFIG *ybf) +{ + int i; + unsigned char *src_ptr1; + unsigned char *dest_ptr1; + + unsigned int Border; + int plane_stride; + + /***********/ + /* Y Plane */ + /***********/ + Border = ybf->border; + plane_stride = ybf->y_stride; + src_ptr1 = ybf->y_buffer - Border; + dest_ptr1 = src_ptr1 - (Border * plane_stride); + + for (i = 0; i < (int)Border; i++) + { + memcpy(dest_ptr1, src_ptr1, plane_stride); + dest_ptr1 += plane_stride; + } + + + /***********/ + /* U Plane */ + /***********/ + plane_stride = ybf->uv_stride; + Border /= 2; + src_ptr1 = ybf->u_buffer - Border; + dest_ptr1 = src_ptr1 - (Border * plane_stride); + + for (i = 0; i < (int)(Border); i++) + { + memcpy(dest_ptr1, src_ptr1, plane_stride); + dest_ptr1 += plane_stride; + } + + /***********/ + /* V Plane */ + /***********/ + + src_ptr1 = ybf->v_buffer - Border; + dest_ptr1 = src_ptr1 - (Border * plane_stride); + + for (i = 0; i < (int)(Border); i++) + { + memcpy(dest_ptr1, src_ptr1, plane_stride); + dest_ptr1 += plane_stride; + } +} + +static void yv12_extend_frame_bottom_c(YV12_BUFFER_CONFIG *ybf) +{ + int i; + unsigned char *src_ptr1, *src_ptr2; + unsigned char *dest_ptr2; + + unsigned int Border; + int plane_stride; + int plane_height; + + /***********/ + /* Y Plane */ + /***********/ + Border = ybf->border; + plane_stride = ybf->y_stride; + plane_height = ybf->y_height; + + src_ptr1 = ybf->y_buffer - Border; + src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; + dest_ptr2 = src_ptr2 + plane_stride; + + for (i = 0; i < (int)Border; i++) + { + memcpy(dest_ptr2, src_ptr2, plane_stride); + dest_ptr2 += plane_stride; + } + + + /***********/ + /* U Plane */ + /***********/ + plane_stride = ybf->uv_stride; + plane_height = ybf->uv_height; + Border /= 2; + + src_ptr1 = ybf->u_buffer - Border; + src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; + dest_ptr2 = src_ptr2 + plane_stride; + + for (i = 0; i < (int)(Border); i++) + { + memcpy(dest_ptr2, src_ptr2, plane_stride); + dest_ptr2 += plane_stride; + } + + /***********/ + /* V Plane */ + /***********/ + + src_ptr1 = ybf->v_buffer - Border; + src_ptr2 = src_ptr1 + (plane_height * plane_stride) - plane_stride; + dest_ptr2 = src_ptr2 + plane_stride; + + for (i = 0; i < (int)(Border); i++) + { + memcpy(dest_ptr2, src_ptr2, plane_stride); + dest_ptr2 += plane_stride; + } +} + +static void yv12_extend_frame_left_right_c(YV12_BUFFER_CONFIG *ybf, + unsigned char *y_src, + unsigned char *u_src, + unsigned char *v_src) +{ + int i; + unsigned char *src_ptr1, *src_ptr2; + unsigned char *dest_ptr1, *dest_ptr2; + + unsigned int Border; + int plane_stride; + int plane_height; + int plane_width; + + /***********/ + /* Y Plane */ + /***********/ + Border = ybf->border; + plane_stride = ybf->y_stride; + plane_height = 16; + plane_width = ybf->y_width; + + /* copy the left and right most columns out */ + src_ptr1 = y_src; + src_ptr2 = src_ptr1 + plane_width - 1; + dest_ptr1 = src_ptr1 - Border; + dest_ptr2 = src_ptr2 + 1; + + for (i = 0; i < plane_height; i++) + { + memset(dest_ptr1, src_ptr1[0], Border); + memset(dest_ptr2, src_ptr2[0], Border); + src_ptr1 += plane_stride; + src_ptr2 += plane_stride; + dest_ptr1 += plane_stride; + dest_ptr2 += plane_stride; + } + + /***********/ + /* U Plane */ + /***********/ + plane_stride = ybf->uv_stride; + plane_height = 8; + plane_width = ybf->uv_width; + Border /= 2; + + /* copy the left and right most columns out */ + src_ptr1 = u_src; + src_ptr2 = src_ptr1 + plane_width - 1; + dest_ptr1 = src_ptr1 - Border; + dest_ptr2 = src_ptr2 + 1; + + for (i = 0; i < plane_height; i++) + { + memset(dest_ptr1, src_ptr1[0], Border); + memset(dest_ptr2, src_ptr2[0], Border); + src_ptr1 += plane_stride; + src_ptr2 += plane_stride; + dest_ptr1 += plane_stride; + dest_ptr2 += plane_stride; + } + + /***********/ + /* V Plane */ + /***********/ + + /* copy the left and right most columns out */ + src_ptr1 = v_src; + src_ptr2 = src_ptr1 + plane_width - 1; + dest_ptr1 = src_ptr1 - Border; + dest_ptr2 = src_ptr2 + 1; + + for (i = 0; i < plane_height; i++) + { + memset(dest_ptr1, src_ptr1[0], Border); + memset(dest_ptr2, src_ptr2[0], Border); + src_ptr1 += plane_stride; + src_ptr2 += plane_stride; + dest_ptr1 += plane_stride; + dest_ptr2 += plane_stride; + } +} + +static void decode_mb_rows(VP8D_COMP *pbi) +{ + VP8_COMMON *const pc = & pbi->common; + MACROBLOCKD *const xd = & pbi->mb; + + MODE_INFO *lf_mic = xd->mode_info_context; + + int ibc = 0; + int num_part = 1 << pc->multi_token_partition; + + int recon_yoffset, recon_uvoffset; + int mb_row, mb_col; + int mb_idx = 0; + + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + + int recon_y_stride = yv12_fb_new->y_stride; + int recon_uv_stride = yv12_fb_new->uv_stride; + + unsigned char *ref_buffer[MAX_REF_FRAMES][3]; + unsigned char *dst_buffer[3]; + unsigned char *lf_dst[3]; + unsigned char *eb_dst[3]; + int i; + int ref_fb_corrupted[MAX_REF_FRAMES]; + + ref_fb_corrupted[INTRA_FRAME] = 0; + + for(i = 1; i < MAX_REF_FRAMES; i++) + { + YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; + + ref_buffer[i][0] = this_fb->y_buffer; + ref_buffer[i][1] = this_fb->u_buffer; + ref_buffer[i][2] = this_fb->v_buffer; + + ref_fb_corrupted[i] = this_fb->corrupted; + } + + /* Set up the buffer pointers */ + eb_dst[0] = lf_dst[0] = dst_buffer[0] = yv12_fb_new->y_buffer; + eb_dst[1] = lf_dst[1] = dst_buffer[1] = yv12_fb_new->u_buffer; + eb_dst[2] = lf_dst[2] = dst_buffer[2] = yv12_fb_new->v_buffer; + + xd->up_available = 0; + + /* Initialize the loop filter for this frame. */ + if(pc->filter_level) + vp8_loop_filter_frame_init(pc, xd, pc->filter_level); + + vp8_setup_intra_recon_top_line(yv12_fb_new); + + /* Decode the individual macro block */ + for (mb_row = 0; mb_row < pc->mb_rows; mb_row++) + { + if (num_part > 1) + { + xd->current_bc = & pbi->mbc[ibc]; + ibc++; + + if (ibc == num_part) + ibc = 0; + } + + recon_yoffset = mb_row * recon_y_stride * 16; + recon_uvoffset = mb_row * recon_uv_stride * 8; + + /* reset contexts */ + xd->above_context = pc->above_context; + memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + + xd->left_available = 0; + + xd->mb_to_top_edge = -((mb_row * 16) << 3); + xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; + + xd->recon_above[0] = dst_buffer[0] + recon_yoffset; + xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; + xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; + + xd->recon_left[0] = xd->recon_above[0] - 1; + xd->recon_left[1] = xd->recon_above[1] - 1; + xd->recon_left[2] = xd->recon_above[2] - 1; + + xd->recon_above[0] -= xd->dst.y_stride; + xd->recon_above[1] -= xd->dst.uv_stride; + xd->recon_above[2] -= xd->dst.uv_stride; + + /* TODO: move to outside row loop */ + xd->recon_left_stride[0] = xd->dst.y_stride; + xd->recon_left_stride[1] = xd->dst.uv_stride; + + setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], + xd->recon_left[2], xd->dst.y_stride, + xd->dst.uv_stride); + + for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) + { + /* Distance of Mb to the various image edges. + * These are specified to 8th pel as they are always compared to values + * that are in 1/8th pel units + */ + xd->mb_to_left_edge = -((mb_col * 16) << 3); + xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; + +#if CONFIG_ERROR_CONCEALMENT + { + int corrupt_residual = (!pbi->independent_partitions && + pbi->frame_corrupt_residual) || + vp8dx_bool_error(xd->current_bc); + if (pbi->ec_active && + xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME && + corrupt_residual) + { + /* We have an intra block with corrupt coefficients, better to + * conceal with an inter block. Interpolate MVs from neighboring + * MBs. + * + * Note that for the first mb with corrupt residual in a frame, + * we might not discover that before decoding the residual. That + * happens after this check, and therefore no inter concealment + * will be done. + */ + vp8_interpolate_motion(xd, + mb_row, mb_col, + pc->mb_rows, pc->mb_cols, + pc->mode_info_stride); + } + } +#endif + + xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; + xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; + xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; + + if (xd->mode_info_context->mbmi.ref_frame >= LAST_FRAME) { + MV_REFERENCE_FRAME ref = xd->mode_info_context->mbmi.ref_frame; + xd->pre.y_buffer = ref_buffer[ref][0] + recon_yoffset; + xd->pre.u_buffer = ref_buffer[ref][1] + recon_uvoffset; + xd->pre.v_buffer = ref_buffer[ref][2] + recon_uvoffset; + } else { + // ref_frame is INTRA_FRAME, pre buffer should not be used. + xd->pre.y_buffer = 0; + xd->pre.u_buffer = 0; + xd->pre.v_buffer = 0; + } + + /* propagate errors from reference frames */ + xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; + + decode_macroblock(pbi, xd, mb_idx); + + mb_idx++; + xd->left_available = 1; + + /* check if the boolean decoder has suffered an error */ + xd->corrupted |= vp8dx_bool_error(xd->current_bc); + + xd->recon_above[0] += 16; + xd->recon_above[1] += 8; + xd->recon_above[2] += 8; + xd->recon_left[0] += 16; + xd->recon_left[1] += 8; + xd->recon_left[2] += 8; + + recon_yoffset += 16; + recon_uvoffset += 8; + + ++xd->mode_info_context; /* next mb */ + + xd->above_context++; + } + + /* adjust to the next row of mbs */ + vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, + xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); + + ++xd->mode_info_context; /* skip prediction column */ + xd->up_available = 1; + + if(pc->filter_level) + { + if(mb_row > 0) + { + if (pc->filter_type == NORMAL_LOOPFILTER) + vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, + recon_y_stride, recon_uv_stride, + lf_dst[0], lf_dst[1], lf_dst[2]); + else + vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, + recon_y_stride, recon_uv_stride, + lf_dst[0], lf_dst[1], lf_dst[2]); + if(mb_row > 1) + { + yv12_extend_frame_left_right_c(yv12_fb_new, + eb_dst[0], + eb_dst[1], + eb_dst[2]); + + eb_dst[0] += recon_y_stride * 16; + eb_dst[1] += recon_uv_stride * 8; + eb_dst[2] += recon_uv_stride * 8; + } + + lf_dst[0] += recon_y_stride * 16; + lf_dst[1] += recon_uv_stride * 8; + lf_dst[2] += recon_uv_stride * 8; + lf_mic += pc->mb_cols; + lf_mic++; /* Skip border mb */ + } + } + else + { + if(mb_row > 0) + { + /**/ + yv12_extend_frame_left_right_c(yv12_fb_new, + eb_dst[0], + eb_dst[1], + eb_dst[2]); + eb_dst[0] += recon_y_stride * 16; + eb_dst[1] += recon_uv_stride * 8; + eb_dst[2] += recon_uv_stride * 8; + } + } + } + + if(pc->filter_level) + { + if (pc->filter_type == NORMAL_LOOPFILTER) + vp8_loop_filter_row_normal(pc, lf_mic, mb_row-1, recon_y_stride, + recon_uv_stride, lf_dst[0], lf_dst[1], + lf_dst[2]); + else + vp8_loop_filter_row_simple(pc, lf_mic, mb_row-1, recon_y_stride, + recon_uv_stride, lf_dst[0], lf_dst[1], + lf_dst[2]); + + yv12_extend_frame_left_right_c(yv12_fb_new, + eb_dst[0], + eb_dst[1], + eb_dst[2]); + eb_dst[0] += recon_y_stride * 16; + eb_dst[1] += recon_uv_stride * 8; + eb_dst[2] += recon_uv_stride * 8; + } + yv12_extend_frame_left_right_c(yv12_fb_new, + eb_dst[0], + eb_dst[1], + eb_dst[2]); + yv12_extend_frame_top_c(yv12_fb_new); + yv12_extend_frame_bottom_c(yv12_fb_new); + +} + +static unsigned int read_partition_size(VP8D_COMP *pbi, + const unsigned char *cx_size) +{ + unsigned char temp[3]; + if (pbi->decrypt_cb) + { + pbi->decrypt_cb(pbi->decrypt_state, cx_size, temp, 3); + cx_size = temp; + } + return cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16); +} + +static int read_is_valid(const unsigned char *start, + size_t len, + const unsigned char *end) +{ + return (start + len > start && start + len <= end); +} + +static unsigned int read_available_partition_size( + VP8D_COMP *pbi, + const unsigned char *token_part_sizes, + const unsigned char *fragment_start, + const unsigned char *first_fragment_end, + const unsigned char *fragment_end, + int i, + int num_part) +{ + VP8_COMMON* pc = &pbi->common; + const unsigned char *partition_size_ptr = token_part_sizes + i * 3; + unsigned int partition_size = 0; + ptrdiff_t bytes_left = fragment_end - fragment_start; + /* Calculate the length of this partition. The last partition + * size is implicit. If the partition size can't be read, then + * either use the remaining data in the buffer (for EC mode) + * or throw an error. + */ + if (i < num_part - 1) + { + if (read_is_valid(partition_size_ptr, 3, first_fragment_end)) + partition_size = read_partition_size(pbi, partition_size_ptr); + else if (pbi->ec_active) + partition_size = (unsigned int)bytes_left; + else + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated partition size data"); + } + else + partition_size = (unsigned int)bytes_left; + + /* Validate the calculated partition length. If the buffer + * described by the partition can't be fully read, then restrict + * it to the portion that can be (for EC mode) or throw an error. + */ + if (!read_is_valid(fragment_start, partition_size, fragment_end)) + { + if (pbi->ec_active) + partition_size = (unsigned int)bytes_left; + else + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet or corrupt partition " + "%d length", i + 1); + } + return partition_size; +} + + +static void setup_token_decoder(VP8D_COMP *pbi, + const unsigned char* token_part_sizes) +{ + vp8_reader *bool_decoder = &pbi->mbc[0]; + unsigned int partition_idx; + unsigned int fragment_idx; + unsigned int num_token_partitions; + const unsigned char *first_fragment_end = pbi->fragments.ptrs[0] + + pbi->fragments.sizes[0]; + + TOKEN_PARTITION multi_token_partition = + (TOKEN_PARTITION)vp8_read_literal(&pbi->mbc[8], 2); + if (!vp8dx_bool_error(&pbi->mbc[8])) + pbi->common.multi_token_partition = multi_token_partition; + num_token_partitions = 1 << pbi->common.multi_token_partition; + + /* Check for partitions within the fragments and unpack the fragments + * so that each fragment pointer points to its corresponding partition. */ + for (fragment_idx = 0; fragment_idx < pbi->fragments.count; ++fragment_idx) + { + unsigned int fragment_size = pbi->fragments.sizes[fragment_idx]; + const unsigned char *fragment_end = pbi->fragments.ptrs[fragment_idx] + + fragment_size; + /* Special case for handling the first partition since we have already + * read its size. */ + if (fragment_idx == 0) + { + /* Size of first partition + token partition sizes element */ + ptrdiff_t ext_first_part_size = token_part_sizes - + pbi->fragments.ptrs[0] + 3 * (num_token_partitions - 1); + fragment_size -= (unsigned int)ext_first_part_size; + if (fragment_size > 0) + { + pbi->fragments.sizes[0] = (unsigned int)ext_first_part_size; + /* The fragment contains an additional partition. Move to + * next. */ + fragment_idx++; + pbi->fragments.ptrs[fragment_idx] = pbi->fragments.ptrs[0] + + pbi->fragments.sizes[0]; + } + } + /* Split the chunk into partitions read from the bitstream */ + while (fragment_size > 0) + { + ptrdiff_t partition_size = read_available_partition_size( + pbi, + token_part_sizes, + pbi->fragments.ptrs[fragment_idx], + first_fragment_end, + fragment_end, + fragment_idx - 1, + num_token_partitions); + pbi->fragments.sizes[fragment_idx] = (unsigned int)partition_size; + fragment_size -= (unsigned int)partition_size; + assert(fragment_idx <= num_token_partitions); + if (fragment_size > 0) + { + /* The fragment contains an additional partition. + * Move to next. */ + fragment_idx++; + pbi->fragments.ptrs[fragment_idx] = + pbi->fragments.ptrs[fragment_idx - 1] + partition_size; + } + } + } + + pbi->fragments.count = num_token_partitions + 1; + + for (partition_idx = 1; partition_idx < pbi->fragments.count; ++partition_idx) + { + if (vp8dx_start_decode(bool_decoder, + pbi->fragments.ptrs[partition_idx], + pbi->fragments.sizes[partition_idx], + pbi->decrypt_cb, pbi->decrypt_state)) + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR, + "Failed to allocate bool decoder %d", + partition_idx); + + bool_decoder++; + } + +#if CONFIG_MULTITHREAD + /* Clamp number of decoder threads */ + if (pbi->decoding_thread_count > num_token_partitions - 1) + pbi->decoding_thread_count = num_token_partitions - 1; +#endif +} + + +static void init_frame(VP8D_COMP *pbi) +{ + VP8_COMMON *const pc = & pbi->common; + MACROBLOCKD *const xd = & pbi->mb; + + if (pc->frame_type == KEY_FRAME) + { + /* Various keyframe initializations */ + memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context)); + + vp8_init_mbmode_probs(pc); + + vp8_default_coef_probs(pc); + + /* reset the segment feature data to 0 with delta coding (Default state). */ + memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); + xd->mb_segement_abs_delta = SEGMENT_DELTADATA; + + /* reset the mode ref deltasa for loop filter */ + memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas)); + memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas)); + + /* All buffers are implicitly updated on key frames. */ + pc->refresh_golden_frame = 1; + pc->refresh_alt_ref_frame = 1; + pc->copy_buffer_to_gf = 0; + pc->copy_buffer_to_arf = 0; + + /* Note that Golden and Altref modes cannot be used on a key frame so + * ref_frame_sign_bias[] is undefined and meaningless + */ + pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0; + pc->ref_frame_sign_bias[ALTREF_FRAME] = 0; + } + else + { + /* To enable choice of different interploation filters */ + if (!pc->use_bilinear_mc_filter) + { + xd->subpixel_predict = vp8_sixtap_predict4x4; + xd->subpixel_predict8x4 = vp8_sixtap_predict8x4; + xd->subpixel_predict8x8 = vp8_sixtap_predict8x8; + xd->subpixel_predict16x16 = vp8_sixtap_predict16x16; + } + else + { + xd->subpixel_predict = vp8_bilinear_predict4x4; + xd->subpixel_predict8x4 = vp8_bilinear_predict8x4; + xd->subpixel_predict8x8 = vp8_bilinear_predict8x8; + xd->subpixel_predict16x16 = vp8_bilinear_predict16x16; + } + + if (pbi->decoded_key_frame && pbi->ec_enabled && !pbi->ec_active) + pbi->ec_active = 1; + } + + xd->left_context = &pc->left_context; + xd->mode_info_context = pc->mi; + xd->frame_type = pc->frame_type; + xd->mode_info_context->mbmi.mode = DC_PRED; + xd->mode_info_stride = pc->mode_info_stride; + xd->corrupted = 0; /* init without corruption */ + + xd->fullpixel_mask = 0xffffffff; + if(pc->full_pixel) + xd->fullpixel_mask = 0xfffffff8; + +} + +int vp8_decode_frame(VP8D_COMP *pbi) +{ + vp8_reader *const bc = &pbi->mbc[8]; + VP8_COMMON *const pc = &pbi->common; + MACROBLOCKD *const xd = &pbi->mb; + const unsigned char *data = pbi->fragments.ptrs[0]; + const unsigned char *data_end = data + pbi->fragments.sizes[0]; + ptrdiff_t first_partition_length_in_bytes; + + int i, j, k, l; + const int *const mb_feature_data_bits = vp8_mb_feature_data_bits; + int corrupt_tokens = 0; + int prev_independent_partitions = pbi->independent_partitions; + + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + + /* start with no corruption of current frame */ + xd->corrupted = 0; + yv12_fb_new->corrupted = 0; + + if (data_end - data < 3) + { + if (!pbi->ec_active) + { + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet"); + } + + /* Declare the missing frame as an inter frame since it will + be handled as an inter frame when we have estimated its + motion vectors. */ + pc->frame_type = INTER_FRAME; + pc->version = 0; + pc->show_frame = 1; + first_partition_length_in_bytes = 0; + } + else + { + unsigned char clear_buffer[10]; + const unsigned char *clear = data; + if (pbi->decrypt_cb) + { + int n = (int)MIN(sizeof(clear_buffer), data_end - data); + pbi->decrypt_cb(pbi->decrypt_state, data, clear_buffer, n); + clear = clear_buffer; + } + + pc->frame_type = (FRAME_TYPE)(clear[0] & 1); + pc->version = (clear[0] >> 1) & 7; + pc->show_frame = (clear[0] >> 4) & 1; + first_partition_length_in_bytes = + (clear[0] | (clear[1] << 8) | (clear[2] << 16)) >> 5; + + if (!pbi->ec_active && + (data + first_partition_length_in_bytes > data_end + || data + first_partition_length_in_bytes < data)) + vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME, + "Truncated packet or corrupt partition 0 length"); + + data += 3; + clear += 3; + + vp8_setup_version(pc); + + + if (pc->frame_type == KEY_FRAME) + { + /* vet via sync code */ + /* When error concealment is enabled we should only check the sync + * code if we have enough bits available + */ + if (!pbi->ec_active || data + 3 < data_end) + { + if (clear[0] != 0x9d || clear[1] != 0x01 || clear[2] != 0x2a) + vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM, + "Invalid frame sync code"); + } + + /* If error concealment is enabled we should only parse the new size + * if we have enough data. Otherwise we will end up with the wrong + * size. + */ + if (!pbi->ec_active || data + 6 < data_end) + { + pc->Width = (clear[3] | (clear[4] << 8)) & 0x3fff; + pc->horiz_scale = clear[4] >> 6; + pc->Height = (clear[5] | (clear[6] << 8)) & 0x3fff; + pc->vert_scale = clear[6] >> 6; + } + data += 7; + } + else + { + memcpy(&xd->pre, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); + memcpy(&xd->dst, yv12_fb_new, sizeof(YV12_BUFFER_CONFIG)); + } + } + if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME)) + { + return -1; + } + + init_frame(pbi); + + if (vp8dx_start_decode(bc, data, (unsigned int)(data_end - data), + pbi->decrypt_cb, pbi->decrypt_state)) + vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR, + "Failed to allocate bool decoder 0"); + if (pc->frame_type == KEY_FRAME) { + (void)vp8_read_bit(bc); // colorspace + pc->clamp_type = (CLAMP_TYPE)vp8_read_bit(bc); + } + + /* Is segmentation enabled */ + xd->segmentation_enabled = (unsigned char)vp8_read_bit(bc); + + if (xd->segmentation_enabled) + { + /* Signal whether or not the segmentation map is being explicitly updated this frame. */ + xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc); + xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc); + + if (xd->update_mb_segmentation_data) + { + xd->mb_segement_abs_delta = (unsigned char)vp8_read_bit(bc); + + memset(xd->segment_feature_data, 0, sizeof(xd->segment_feature_data)); + + /* For each segmentation feature (Quant and loop filter level) */ + for (i = 0; i < MB_LVL_MAX; i++) + { + for (j = 0; j < MAX_MB_SEGMENTS; j++) + { + /* Frame level data */ + if (vp8_read_bit(bc)) + { + xd->segment_feature_data[i][j] = (signed char)vp8_read_literal(bc, mb_feature_data_bits[i]); + + if (vp8_read_bit(bc)) + xd->segment_feature_data[i][j] = -xd->segment_feature_data[i][j]; + } + else + xd->segment_feature_data[i][j] = 0; + } + } + } + + if (xd->update_mb_segmentation_map) + { + /* Which macro block level features are enabled */ + memset(xd->mb_segment_tree_probs, 255, sizeof(xd->mb_segment_tree_probs)); + + /* Read the probs used to decode the segment id for each macro block. */ + for (i = 0; i < MB_FEATURE_TREE_PROBS; i++) + { + /* If not explicitly set value is defaulted to 255 by memset above */ + if (vp8_read_bit(bc)) + xd->mb_segment_tree_probs[i] = (vp8_prob)vp8_read_literal(bc, 8); + } + } + } + else + { + /* No segmentation updates on this frame */ + xd->update_mb_segmentation_map = 0; + xd->update_mb_segmentation_data = 0; + } + + /* Read the loop filter level and type */ + pc->filter_type = (LOOPFILTERTYPE) vp8_read_bit(bc); + pc->filter_level = vp8_read_literal(bc, 6); + pc->sharpness_level = vp8_read_literal(bc, 3); + + /* Read in loop filter deltas applied at the MB level based on mode or ref frame. */ + xd->mode_ref_lf_delta_update = 0; + xd->mode_ref_lf_delta_enabled = (unsigned char)vp8_read_bit(bc); + + if (xd->mode_ref_lf_delta_enabled) + { + /* Do the deltas need to be updated */ + xd->mode_ref_lf_delta_update = (unsigned char)vp8_read_bit(bc); + + if (xd->mode_ref_lf_delta_update) + { + /* Send update */ + for (i = 0; i < MAX_REF_LF_DELTAS; i++) + { + if (vp8_read_bit(bc)) + { + /*sign = vp8_read_bit( bc );*/ + xd->ref_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6); + + if (vp8_read_bit(bc)) /* Apply sign */ + xd->ref_lf_deltas[i] = xd->ref_lf_deltas[i] * -1; + } + } + + /* Send update */ + for (i = 0; i < MAX_MODE_LF_DELTAS; i++) + { + if (vp8_read_bit(bc)) + { + /*sign = vp8_read_bit( bc );*/ + xd->mode_lf_deltas[i] = (signed char)vp8_read_literal(bc, 6); + + if (vp8_read_bit(bc)) /* Apply sign */ + xd->mode_lf_deltas[i] = xd->mode_lf_deltas[i] * -1; + } + } + } + } + + setup_token_decoder(pbi, data + first_partition_length_in_bytes); + + xd->current_bc = &pbi->mbc[0]; + + /* Read the default quantizers. */ + { + int Q, q_update; + + Q = vp8_read_literal(bc, 7); /* AC 1st order Q = default */ + pc->base_qindex = Q; + q_update = 0; + pc->y1dc_delta_q = get_delta_q(bc, pc->y1dc_delta_q, &q_update); + pc->y2dc_delta_q = get_delta_q(bc, pc->y2dc_delta_q, &q_update); + pc->y2ac_delta_q = get_delta_q(bc, pc->y2ac_delta_q, &q_update); + pc->uvdc_delta_q = get_delta_q(bc, pc->uvdc_delta_q, &q_update); + pc->uvac_delta_q = get_delta_q(bc, pc->uvac_delta_q, &q_update); + + if (q_update) + vp8cx_init_de_quantizer(pbi); + + /* MB level dequantizer setup */ + vp8_mb_init_dequantizer(pbi, &pbi->mb); + } + + /* Determine if the golden frame or ARF buffer should be updated and how. + * For all non key frames the GF and ARF refresh flags and sign bias + * flags must be set explicitly. + */ + if (pc->frame_type != KEY_FRAME) + { + /* Should the GF or ARF be updated from the current frame */ + pc->refresh_golden_frame = vp8_read_bit(bc); +#if CONFIG_ERROR_CONCEALMENT + /* Assume we shouldn't refresh golden if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) + pc->refresh_golden_frame = 0; +#endif + + pc->refresh_alt_ref_frame = vp8_read_bit(bc); +#if CONFIG_ERROR_CONCEALMENT + /* Assume we shouldn't refresh altref if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) + pc->refresh_alt_ref_frame = 0; +#endif + + /* Buffer to buffer copy flags. */ + pc->copy_buffer_to_gf = 0; + + if (!pc->refresh_golden_frame) + pc->copy_buffer_to_gf = vp8_read_literal(bc, 2); + +#if CONFIG_ERROR_CONCEALMENT + /* Assume we shouldn't copy to the golden if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) + pc->copy_buffer_to_gf = 0; +#endif + + pc->copy_buffer_to_arf = 0; + + if (!pc->refresh_alt_ref_frame) + pc->copy_buffer_to_arf = vp8_read_literal(bc, 2); + +#if CONFIG_ERROR_CONCEALMENT + /* Assume we shouldn't copy to the alt-ref if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) + pc->copy_buffer_to_arf = 0; +#endif + + + pc->ref_frame_sign_bias[GOLDEN_FRAME] = vp8_read_bit(bc); + pc->ref_frame_sign_bias[ALTREF_FRAME] = vp8_read_bit(bc); + } + + pc->refresh_entropy_probs = vp8_read_bit(bc); +#if CONFIG_ERROR_CONCEALMENT + /* Assume we shouldn't refresh the probabilities if the bit is + * missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) + pc->refresh_entropy_probs = 0; +#endif + if (pc->refresh_entropy_probs == 0) + { + memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc)); + } + + pc->refresh_last_frame = pc->frame_type == KEY_FRAME || vp8_read_bit(bc); + +#if CONFIG_ERROR_CONCEALMENT + /* Assume we should refresh the last frame if the bit is missing */ + xd->corrupted |= vp8dx_bool_error(bc); + if (pbi->ec_active && xd->corrupted) + pc->refresh_last_frame = 1; +#endif + + if (0) + { + FILE *z = fopen("decodestats.stt", "a"); + fprintf(z, "%6d F:%d,G:%d,A:%d,L:%d,Q:%d\n", + pc->current_video_frame, + pc->frame_type, + pc->refresh_golden_frame, + pc->refresh_alt_ref_frame, + pc->refresh_last_frame, + pc->base_qindex); + fclose(z); + } + + { + pbi->independent_partitions = 1; + + /* read coef probability tree */ + for (i = 0; i < BLOCK_TYPES; i++) + for (j = 0; j < COEF_BANDS; j++) + for (k = 0; k < PREV_COEF_CONTEXTS; k++) + for (l = 0; l < ENTROPY_NODES; l++) + { + + vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l; + + if (vp8_read(bc, vp8_coef_update_probs [i][j][k][l])) + { + *p = (vp8_prob)vp8_read_literal(bc, 8); + + } + if (k > 0 && *p != pc->fc.coef_probs[i][j][k-1][l]) + pbi->independent_partitions = 0; + + } + } + + /* clear out the coeff buffer */ + memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); + + vp8_decode_mode_mvs(pbi); + +#if CONFIG_ERROR_CONCEALMENT + if (pbi->ec_active && + pbi->mvs_corrupt_from_mb < (unsigned int)pc->mb_cols * pc->mb_rows) + { + /* Motion vectors are missing in this frame. We will try to estimate + * them and then continue decoding the frame as usual */ + vp8_estimate_missing_mvs(pbi); + } +#endif + + memset(pc->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * pc->mb_cols); + pbi->frame_corrupt_residual = 0; + +#if CONFIG_MULTITHREAD + if (pbi->b_multithreaded_rd && pc->multi_token_partition != ONE_PARTITION) + { + unsigned int thread; + vp8mt_decode_mb_rows(pbi, xd); + vp8_yv12_extend_frame_borders(yv12_fb_new); + for (thread = 0; thread < pbi->decoding_thread_count; ++thread) + corrupt_tokens |= pbi->mb_row_di[thread].mbd.corrupted; + } + else +#endif + { + decode_mb_rows(pbi); + corrupt_tokens |= xd->corrupted; + } + + /* Collect information about decoder corruption. */ + /* 1. Check first boolean decoder for errors. */ + yv12_fb_new->corrupted = vp8dx_bool_error(bc); + /* 2. Check the macroblock information */ + yv12_fb_new->corrupted |= corrupt_tokens; + + if (!pbi->decoded_key_frame) + { + if (pc->frame_type == KEY_FRAME && + !yv12_fb_new->corrupted) + pbi->decoded_key_frame = 1; + else + vpx_internal_error(&pbi->common.error, VPX_CODEC_CORRUPT_FRAME, + "A stream must start with a complete key frame"); + } + + /* vpx_log("Decoder: Frame Decoded, Size Roughly:%d bytes \n",bc->pos+pbi->bc2.pos); */ + + if (pc->refresh_entropy_probs == 0) + { + memcpy(&pc->fc, &pc->lfc, sizeof(pc->fc)); + pbi->independent_partitions = prev_independent_partitions; + } + +#ifdef PACKET_TESTING + { + FILE *f = fopen("decompressor.VP8", "ab"); + unsigned int size = pbi->bc2.pos + pbi->bc.pos + 8; + fwrite((void *) &size, 4, 1, f); + fwrite((void *) pbi->Source, size, 1, f); + fclose(f); + } +#endif + + return 0; +} diff --git a/media/libvpx/vp8/decoder/decodemv.c b/media/libvpx/vp8/decoder/decodemv.c new file mode 100644 index 000000000..1d155e7e1 --- /dev/null +++ b/media/libvpx/vp8/decoder/decodemv.c @@ -0,0 +1,670 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#include "treereader.h" +#include "vp8/common/entropymv.h" +#include "vp8/common/entropymode.h" +#include "onyxd_int.h" +#include "vp8/common/findnearmv.h" + +#if CONFIG_DEBUG +#include <assert.h> +#endif +static B_PREDICTION_MODE read_bmode(vp8_reader *bc, const vp8_prob *p) +{ + const int i = vp8_treed_read(bc, vp8_bmode_tree, p); + + return (B_PREDICTION_MODE)i; +} + +static MB_PREDICTION_MODE read_ymode(vp8_reader *bc, const vp8_prob *p) +{ + const int i = vp8_treed_read(bc, vp8_ymode_tree, p); + + return (MB_PREDICTION_MODE)i; +} + +static MB_PREDICTION_MODE read_kf_ymode(vp8_reader *bc, const vp8_prob *p) +{ + const int i = vp8_treed_read(bc, vp8_kf_ymode_tree, p); + + return (MB_PREDICTION_MODE)i; +} + +static MB_PREDICTION_MODE read_uv_mode(vp8_reader *bc, const vp8_prob *p) +{ + const int i = vp8_treed_read(bc, vp8_uv_mode_tree, p); + + return (MB_PREDICTION_MODE)i; +} + +static void read_kf_modes(VP8D_COMP *pbi, MODE_INFO *mi) +{ + vp8_reader *const bc = & pbi->mbc[8]; + const int mis = pbi->common.mode_info_stride; + + mi->mbmi.ref_frame = INTRA_FRAME; + mi->mbmi.mode = read_kf_ymode(bc, vp8_kf_ymode_prob); + + if (mi->mbmi.mode == B_PRED) + { + int i = 0; + mi->mbmi.is_4x4 = 1; + + do + { + const B_PREDICTION_MODE A = above_block_mode(mi, i, mis); + const B_PREDICTION_MODE L = left_block_mode(mi, i); + + mi->bmi[i].as_mode = + read_bmode(bc, vp8_kf_bmode_prob [A] [L]); + } + while (++i < 16); + } + + mi->mbmi.uv_mode = read_uv_mode(bc, vp8_kf_uv_mode_prob); +} + +static int read_mvcomponent(vp8_reader *r, const MV_CONTEXT *mvc) +{ + const vp8_prob *const p = (const vp8_prob *) mvc; + int x = 0; + + if (vp8_read(r, p [mvpis_short])) /* Large */ + { + int i = 0; + + do + { + x += vp8_read(r, p [MVPbits + i]) << i; + } + while (++i < 3); + + i = mvlong_width - 1; /* Skip bit 3, which is sometimes implicit */ + + do + { + x += vp8_read(r, p [MVPbits + i]) << i; + } + while (--i > 3); + + if (!(x & 0xFFF0) || vp8_read(r, p [MVPbits + 3])) + x += 8; + } + else /* small */ + x = vp8_treed_read(r, vp8_small_mvtree, p + MVPshort); + + if (x && vp8_read(r, p [MVPsign])) + x = -x; + + return x; +} + +static void read_mv(vp8_reader *r, MV *mv, const MV_CONTEXT *mvc) +{ + mv->row = (short)(read_mvcomponent(r, mvc) * 2); + mv->col = (short)(read_mvcomponent(r, ++mvc) * 2); +} + + +static void read_mvcontexts(vp8_reader *bc, MV_CONTEXT *mvc) +{ + int i = 0; + + do + { + const vp8_prob *up = vp8_mv_update_probs[i].prob; + vp8_prob *p = (vp8_prob *)(mvc + i); + vp8_prob *const pstop = p + MVPcount; + + do + { + if (vp8_read(bc, *up++)) + { + const vp8_prob x = (vp8_prob)vp8_read_literal(bc, 7); + + *p = x ? x << 1 : 1; + } + } + while (++p < pstop); + } + while (++i < 2); +} + +static const unsigned char mbsplit_fill_count[4] = {8, 8, 4, 1}; +static const unsigned char mbsplit_fill_offset[4][16] = { + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + { 0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15}, + { 0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15}, + { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} +}; + + +static void mb_mode_mv_init(VP8D_COMP *pbi) +{ + vp8_reader *const bc = & pbi->mbc[8]; + MV_CONTEXT *const mvc = pbi->common.fc.mvc; + +#if CONFIG_ERROR_CONCEALMENT + /* Default is that no macroblock is corrupt, therefore we initialize + * mvs_corrupt_from_mb to something very big, which we can be sure is + * outside the frame. */ + pbi->mvs_corrupt_from_mb = UINT_MAX; +#endif + /* Read the mb_no_coeff_skip flag */ + pbi->common.mb_no_coeff_skip = (int)vp8_read_bit(bc); + + pbi->prob_skip_false = 0; + if (pbi->common.mb_no_coeff_skip) + pbi->prob_skip_false = (vp8_prob)vp8_read_literal(bc, 8); + + if(pbi->common.frame_type != KEY_FRAME) + { + pbi->prob_intra = (vp8_prob)vp8_read_literal(bc, 8); + pbi->prob_last = (vp8_prob)vp8_read_literal(bc, 8); + pbi->prob_gf = (vp8_prob)vp8_read_literal(bc, 8); + + if (vp8_read_bit(bc)) + { + int i = 0; + + do + { + pbi->common.fc.ymode_prob[i] = + (vp8_prob) vp8_read_literal(bc, 8); + } + while (++i < 4); + } + + if (vp8_read_bit(bc)) + { + int i = 0; + + do + { + pbi->common.fc.uv_mode_prob[i] = + (vp8_prob) vp8_read_literal(bc, 8); + } + while (++i < 3); + } + + read_mvcontexts(bc, mvc); + } +} + +const vp8_prob vp8_sub_mv_ref_prob3 [8][VP8_SUBMVREFS-1] = +{ + { 147, 136, 18 }, /* SUBMVREF_NORMAL */ + { 223, 1 , 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ + { 106, 145, 1 }, /* SUBMVREF_LEFT_ZED */ + { 208, 1 , 1 }, /* SUBMVREF_LEFT_ABOVE_ZED */ + { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ + { 223, 1 , 34 }, /* SUBMVREF_LEFT_ABOVE_SAME */ + { 179, 121, 1 }, /* SUBMVREF_ABOVE_ZED */ + { 208, 1 , 1 } /* SUBMVREF_LEFT_ABOVE_ZED */ +}; + +static +const vp8_prob * get_sub_mv_ref_prob(const int left, const int above) +{ + int lez = (left == 0); + int aez = (above == 0); + int lea = (left == above); + const vp8_prob * prob; + + prob = vp8_sub_mv_ref_prob3[(aez << 2) | + (lez << 1) | + (lea)]; + + return prob; +} + +static void decode_split_mv(vp8_reader *const bc, MODE_INFO *mi, + const MODE_INFO *left_mb, const MODE_INFO *above_mb, + MB_MODE_INFO *mbmi, int_mv best_mv, + MV_CONTEXT *const mvc, int mb_to_left_edge, + int mb_to_right_edge, int mb_to_top_edge, + int mb_to_bottom_edge) +{ + int s; /* split configuration (16x8, 8x16, 8x8, 4x4) */ + int num_p; /* number of partitions in the split configuration + (see vp8_mbsplit_count) */ + int j = 0; + + s = 3; + num_p = 16; + if( vp8_read(bc, 110) ) + { + s = 2; + num_p = 4; + if( vp8_read(bc, 111) ) + { + s = vp8_read(bc, 150); + num_p = 2; + } + } + + do /* for each subset j */ + { + int_mv leftmv, abovemv; + int_mv blockmv; + int k; /* first block in subset j */ + + const vp8_prob *prob; + k = vp8_mbsplit_offset[s][j]; + + if (!(k & 3)) + { + /* On L edge, get from MB to left of us */ + if(left_mb->mbmi.mode != SPLITMV) + leftmv.as_int = left_mb->mbmi.mv.as_int; + else + leftmv.as_int = (left_mb->bmi + k + 4 - 1)->mv.as_int; + } + else + leftmv.as_int = (mi->bmi + k - 1)->mv.as_int; + + if (!(k >> 2)) + { + /* On top edge, get from MB above us */ + if(above_mb->mbmi.mode != SPLITMV) + abovemv.as_int = above_mb->mbmi.mv.as_int; + else + abovemv.as_int = (above_mb->bmi + k + 16 - 4)->mv.as_int; + } + else + abovemv.as_int = (mi->bmi + k - 4)->mv.as_int; + + prob = get_sub_mv_ref_prob(leftmv.as_int, abovemv.as_int); + + if( vp8_read(bc, prob[0]) ) + { + if( vp8_read(bc, prob[1]) ) + { + blockmv.as_int = 0; + if( vp8_read(bc, prob[2]) ) + { + blockmv.as_mv.row = read_mvcomponent(bc, &mvc[0]) * 2; + blockmv.as_mv.row += best_mv.as_mv.row; + blockmv.as_mv.col = read_mvcomponent(bc, &mvc[1]) * 2; + blockmv.as_mv.col += best_mv.as_mv.col; + } + } + else + { + blockmv.as_int = abovemv.as_int; + } + } + else + { + blockmv.as_int = leftmv.as_int; + } + + mbmi->need_to_clamp_mvs |= vp8_check_mv_bounds(&blockmv, + mb_to_left_edge, + mb_to_right_edge, + mb_to_top_edge, + mb_to_bottom_edge); + + { + /* Fill (uniform) modes, mvs of jth subset. + Must do it here because ensuing subsets can + refer back to us via "left" or "above". */ + const unsigned char *fill_offset; + unsigned int fill_count = mbsplit_fill_count[s]; + + fill_offset = &mbsplit_fill_offset[s] + [(unsigned char)j * mbsplit_fill_count[s]]; + + do { + mi->bmi[ *fill_offset].mv.as_int = blockmv.as_int; + fill_offset++; + }while (--fill_count); + } + + } + while (++j < num_p); + + mbmi->partitioning = s; +} + +static void read_mb_modes_mv(VP8D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi) +{ + vp8_reader *const bc = & pbi->mbc[8]; + mbmi->ref_frame = (MV_REFERENCE_FRAME) vp8_read(bc, pbi->prob_intra); + if (mbmi->ref_frame) /* inter MB */ + { + enum {CNT_INTRA, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV}; + int cnt[4]; + int *cntx = cnt; + int_mv near_mvs[4]; + int_mv *nmv = near_mvs; + const int mis = pbi->mb.mode_info_stride; + const MODE_INFO *above = mi - mis; + const MODE_INFO *left = mi - 1; + const MODE_INFO *aboveleft = above - 1; + int *ref_frame_sign_bias = pbi->common.ref_frame_sign_bias; + + mbmi->need_to_clamp_mvs = 0; + + if (vp8_read(bc, pbi->prob_last)) + { + mbmi->ref_frame = + (MV_REFERENCE_FRAME)((int)(2 + vp8_read(bc, pbi->prob_gf))); + } + + /* Zero accumulators */ + nmv[0].as_int = nmv[1].as_int = nmv[2].as_int = 0; + cnt[0] = cnt[1] = cnt[2] = cnt[3] = 0; + + /* Process above */ + if (above->mbmi.ref_frame != INTRA_FRAME) + { + if (above->mbmi.mv.as_int) + { + (++nmv)->as_int = above->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], + mbmi->ref_frame, nmv, ref_frame_sign_bias); + ++cntx; + } + + *cntx += 2; + } + + /* Process left */ + if (left->mbmi.ref_frame != INTRA_FRAME) + { + if (left->mbmi.mv.as_int) + { + int_mv this_mv; + + this_mv.as_int = left->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], + mbmi->ref_frame, &this_mv, ref_frame_sign_bias); + + if (this_mv.as_int != nmv->as_int) + { + (++nmv)->as_int = this_mv.as_int; + ++cntx; + } + + *cntx += 2; + } + else + cnt[CNT_INTRA] += 2; + } + + /* Process above left */ + if (aboveleft->mbmi.ref_frame != INTRA_FRAME) + { + if (aboveleft->mbmi.mv.as_int) + { + int_mv this_mv; + + this_mv.as_int = aboveleft->mbmi.mv.as_int; + mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], + mbmi->ref_frame, &this_mv, ref_frame_sign_bias); + + if (this_mv.as_int != nmv->as_int) + { + (++nmv)->as_int = this_mv.as_int; + ++cntx; + } + + *cntx += 1; + } + else + cnt[CNT_INTRA] += 1; + } + + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_INTRA]] [0]) ) + { + + /* If we have three distinct MV's ... */ + /* See if above-left MV can be merged with NEAREST */ + cnt[CNT_NEAREST] += ( (cnt[CNT_SPLITMV] > 0) & + (nmv->as_int == near_mvs[CNT_NEAREST].as_int)); + + /* Swap near and nearest if necessary */ + if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) + { + int tmp; + tmp = cnt[CNT_NEAREST]; + cnt[CNT_NEAREST] = cnt[CNT_NEAR]; + cnt[CNT_NEAR] = tmp; + tmp = near_mvs[CNT_NEAREST].as_int; + near_mvs[CNT_NEAREST].as_int = near_mvs[CNT_NEAR].as_int; + near_mvs[CNT_NEAR].as_int = tmp; + } + + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAREST]] [1]) ) + { + + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_NEAR]] [2]) ) + { + int mb_to_top_edge; + int mb_to_bottom_edge; + int mb_to_left_edge; + int mb_to_right_edge; + MV_CONTEXT *const mvc = pbi->common.fc.mvc; + int near_index; + + mb_to_top_edge = pbi->mb.mb_to_top_edge; + mb_to_bottom_edge = pbi->mb.mb_to_bottom_edge; + mb_to_top_edge -= LEFT_TOP_MARGIN; + mb_to_bottom_edge += RIGHT_BOTTOM_MARGIN; + mb_to_right_edge = pbi->mb.mb_to_right_edge; + mb_to_right_edge += RIGHT_BOTTOM_MARGIN; + mb_to_left_edge = pbi->mb.mb_to_left_edge; + mb_to_left_edge -= LEFT_TOP_MARGIN; + + /* Use near_mvs[0] to store the "best" MV */ + near_index = CNT_INTRA + + (cnt[CNT_NEAREST] >= cnt[CNT_INTRA]); + + vp8_clamp_mv2(&near_mvs[near_index], &pbi->mb); + + cnt[CNT_SPLITMV] = ((above->mbmi.mode == SPLITMV) + + (left->mbmi.mode == SPLITMV)) * 2 + + (aboveleft->mbmi.mode == SPLITMV); + + if( vp8_read(bc, vp8_mode_contexts [cnt[CNT_SPLITMV]] [3]) ) + { + decode_split_mv(bc, mi, left, above, + mbmi, + near_mvs[near_index], + mvc, mb_to_left_edge, + mb_to_right_edge, + mb_to_top_edge, + mb_to_bottom_edge); + mbmi->mv.as_int = mi->bmi[15].mv.as_int; + mbmi->mode = SPLITMV; + mbmi->is_4x4 = 1; + } + else + { + int_mv *const mbmi_mv = & mbmi->mv; + read_mv(bc, &mbmi_mv->as_mv, (const MV_CONTEXT *) mvc); + mbmi_mv->as_mv.row += near_mvs[near_index].as_mv.row; + mbmi_mv->as_mv.col += near_mvs[near_index].as_mv.col; + + /* Don't need to check this on NEARMV and NEARESTMV + * modes since those modes clamp the MV. The NEWMV mode + * does not, so signal to the prediction stage whether + * special handling may be required. + */ + mbmi->need_to_clamp_mvs = + vp8_check_mv_bounds(mbmi_mv, mb_to_left_edge, + mb_to_right_edge, + mb_to_top_edge, + mb_to_bottom_edge); + mbmi->mode = NEWMV; + } + } + else + { + mbmi->mode = NEARMV; + mbmi->mv.as_int = near_mvs[CNT_NEAR].as_int; + vp8_clamp_mv2(&mbmi->mv, &pbi->mb); + } + } + else + { + mbmi->mode = NEARESTMV; + mbmi->mv.as_int = near_mvs[CNT_NEAREST].as_int; + vp8_clamp_mv2(&mbmi->mv, &pbi->mb); + } + } + else + { + mbmi->mode = ZEROMV; + mbmi->mv.as_int = 0; + } + +#if CONFIG_ERROR_CONCEALMENT + if(pbi->ec_enabled && (mbmi->mode != SPLITMV)) + { + mi->bmi[ 0].mv.as_int = + mi->bmi[ 1].mv.as_int = + mi->bmi[ 2].mv.as_int = + mi->bmi[ 3].mv.as_int = + mi->bmi[ 4].mv.as_int = + mi->bmi[ 5].mv.as_int = + mi->bmi[ 6].mv.as_int = + mi->bmi[ 7].mv.as_int = + mi->bmi[ 8].mv.as_int = + mi->bmi[ 9].mv.as_int = + mi->bmi[10].mv.as_int = + mi->bmi[11].mv.as_int = + mi->bmi[12].mv.as_int = + mi->bmi[13].mv.as_int = + mi->bmi[14].mv.as_int = + mi->bmi[15].mv.as_int = mbmi->mv.as_int; + } +#endif + } + else + { + /* required for left and above block mv */ + mbmi->mv.as_int = 0; + + /* MB is intra coded */ + if ((mbmi->mode = read_ymode(bc, pbi->common.fc.ymode_prob)) == B_PRED) + { + int j = 0; + mbmi->is_4x4 = 1; + do + { + mi->bmi[j].as_mode = read_bmode(bc, pbi->common.fc.bmode_prob); + } + while (++j < 16); + } + + mbmi->uv_mode = read_uv_mode(bc, pbi->common.fc.uv_mode_prob); + } + +} + +static void read_mb_features(vp8_reader *r, MB_MODE_INFO *mi, MACROBLOCKD *x) +{ + /* Is segmentation enabled */ + if (x->segmentation_enabled && x->update_mb_segmentation_map) + { + /* If so then read the segment id. */ + if (vp8_read(r, x->mb_segment_tree_probs[0])) + mi->segment_id = + (unsigned char)(2 + vp8_read(r, x->mb_segment_tree_probs[2])); + else + mi->segment_id = + (unsigned char)(vp8_read(r, x->mb_segment_tree_probs[1])); + } +} + +static void decode_mb_mode_mvs(VP8D_COMP *pbi, MODE_INFO *mi, + MB_MODE_INFO *mbmi) +{ + (void)mbmi; + + /* Read the Macroblock segmentation map if it is being updated explicitly + * this frame (reset to 0 above by default) + * By default on a key frame reset all MBs to segment 0 + */ + if (pbi->mb.update_mb_segmentation_map) + read_mb_features(&pbi->mbc[8], &mi->mbmi, &pbi->mb); + else if(pbi->common.frame_type == KEY_FRAME) + mi->mbmi.segment_id = 0; + + /* Read the macroblock coeff skip flag if this feature is in use, + * else default to 0 */ + if (pbi->common.mb_no_coeff_skip) + mi->mbmi.mb_skip_coeff = vp8_read(&pbi->mbc[8], pbi->prob_skip_false); + else + mi->mbmi.mb_skip_coeff = 0; + + mi->mbmi.is_4x4 = 0; + if(pbi->common.frame_type == KEY_FRAME) + read_kf_modes(pbi, mi); + else + read_mb_modes_mv(pbi, mi, &mi->mbmi); + +} + +void vp8_decode_mode_mvs(VP8D_COMP *pbi) +{ + MODE_INFO *mi = pbi->common.mi; + int mb_row = -1; + int mb_to_right_edge_start; + + mb_mode_mv_init(pbi); + + pbi->mb.mb_to_top_edge = 0; + pbi->mb.mb_to_bottom_edge = ((pbi->common.mb_rows - 1) * 16) << 3; + mb_to_right_edge_start = ((pbi->common.mb_cols - 1) * 16) << 3; + + while (++mb_row < pbi->common.mb_rows) + { + int mb_col = -1; + + pbi->mb.mb_to_left_edge = 0; + pbi->mb.mb_to_right_edge = mb_to_right_edge_start; + + while (++mb_col < pbi->common.mb_cols) + { +#if CONFIG_ERROR_CONCEALMENT + int mb_num = mb_row * pbi->common.mb_cols + mb_col; +#endif + + decode_mb_mode_mvs(pbi, mi, &mi->mbmi); + +#if CONFIG_ERROR_CONCEALMENT + /* look for corruption. set mvs_corrupt_from_mb to the current + * mb_num if the frame is corrupt from this macroblock. */ + if (vp8dx_bool_error(&pbi->mbc[8]) && mb_num < + (int)pbi->mvs_corrupt_from_mb) + { + pbi->mvs_corrupt_from_mb = mb_num; + /* no need to continue since the partition is corrupt from + * here on. + */ + return; + } +#endif + + pbi->mb.mb_to_left_edge -= (16 << 3); + pbi->mb.mb_to_right_edge -= (16 << 3); + mi++; /* next macroblock */ + } + pbi->mb.mb_to_top_edge -= (16 << 3); + pbi->mb.mb_to_bottom_edge -= (16 << 3); + + mi++; /* skip left predictor each row */ + } +} diff --git a/media/libvpx/vp8/decoder/decodemv.h b/media/libvpx/vp8/decoder/decodemv.h new file mode 100644 index 000000000..f33b07351 --- /dev/null +++ b/media/libvpx/vp8/decoder/decodemv.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP8_DECODER_DECODEMV_H_ +#define VP8_DECODER_DECODEMV_H_ + +#include "onyxd_int.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void vp8_decode_mode_mvs(VP8D_COMP *); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_DECODEMV_H_ diff --git a/media/libvpx/vp8/decoder/decoderthreading.h b/media/libvpx/vp8/decoder/decoderthreading.h new file mode 100644 index 000000000..c563cf6e9 --- /dev/null +++ b/media/libvpx/vp8/decoder/decoderthreading.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP8_DECODER_DECODERTHREADING_H_ +#define VP8_DECODER_DECODERTHREADING_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#if CONFIG_MULTITHREAD +void vp8mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd); +void vp8_decoder_remove_threads(VP8D_COMP *pbi); +void vp8_decoder_create_threads(VP8D_COMP *pbi); +void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows); +void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows); +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_DECODERTHREADING_H_ diff --git a/media/libvpx/vp8/decoder/detokenize.c b/media/libvpx/vp8/decoder/detokenize.c new file mode 100644 index 000000000..fcc7533c5 --- /dev/null +++ b/media/libvpx/vp8/decoder/detokenize.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#include "vp8/common/blockd.h" +#include "onyxd_int.h" +#include "vpx_mem/vpx_mem.h" +#include "vpx_ports/mem.h" +#include "detokenize.h" + +void vp8_reset_mb_tokens_context(MACROBLOCKD *x) +{ + ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context); + ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context); + + memset(a_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1); + memset(l_ctx, 0, sizeof(ENTROPY_CONTEXT_PLANES)-1); + + /* Clear entropy contexts for Y2 blocks */ + if (!x->mode_info_context->mbmi.is_4x4) + { + a_ctx[8] = l_ctx[8] = 0; + } +} + +/* + ------------------------------------------------------------------------------ + Residual decoding (Paragraph 13.2 / 13.3) +*/ +static const uint8_t kBands[16 + 1] = { + 0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, + 0 /* extra entry as sentinel */ +}; + +static const uint8_t kCat3[] = { 173, 148, 140, 0 }; +static const uint8_t kCat4[] = { 176, 155, 140, 135, 0 }; +static const uint8_t kCat5[] = { 180, 157, 141, 134, 130, 0 }; +static const uint8_t kCat6[] = + { 254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0 }; +static const uint8_t* const kCat3456[] = { kCat3, kCat4, kCat5, kCat6 }; +static const uint8_t kZigzag[16] = { + 0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15 +}; + +#define VP8GetBit vp8dx_decode_bool +#define NUM_PROBAS 11 +#define NUM_CTX 3 + +/* for const-casting */ +typedef const uint8_t (*ProbaArray)[NUM_CTX][NUM_PROBAS]; + +static int GetSigned(BOOL_DECODER *br, int value_to_sign) +{ + int split = (br->range + 1) >> 1; + VP8_BD_VALUE bigsplit = (VP8_BD_VALUE)split << (VP8_BD_VALUE_SIZE - 8); + int v; + + if(br->count < 0) + vp8dx_bool_decoder_fill(br); + + if ( br->value < bigsplit ) + { + br->range = split; + v= value_to_sign; + } + else + { + br->range = br->range-split; + br->value = br->value-bigsplit; + v = -value_to_sign; + } + br->range +=br->range; + br->value +=br->value; + br->count--; + + return v; +} +/* + Returns the position of the last non-zero coeff plus one + (and 0 if there's no coeff at all) +*/ +static int GetCoeffs(BOOL_DECODER *br, ProbaArray prob, + int ctx, int n, int16_t* out) +{ + const uint8_t* p = prob[n][ctx]; + if (!VP8GetBit(br, p[0])) + { /* first EOB is more a 'CBP' bit. */ + return 0; + } + while (1) + { + ++n; + if (!VP8GetBit(br, p[1])) + { + p = prob[kBands[n]][0]; + } + else + { /* non zero coeff */ + int v, j; + if (!VP8GetBit(br, p[2])) + { + p = prob[kBands[n]][1]; + v = 1; + } + else + { + if (!VP8GetBit(br, p[3])) + { + if (!VP8GetBit(br, p[4])) + { + v = 2; + } + else + { + v = 3 + VP8GetBit(br, p[5]); + } + } + else + { + if (!VP8GetBit(br, p[6])) + { + if (!VP8GetBit(br, p[7])) + { + v = 5 + VP8GetBit(br, 159); + } else + { + v = 7 + 2 * VP8GetBit(br, 165); + v += VP8GetBit(br, 145); + } + } + else + { + const uint8_t* tab; + const int bit1 = VP8GetBit(br, p[8]); + const int bit0 = VP8GetBit(br, p[9 + bit1]); + const int cat = 2 * bit1 + bit0; + v = 0; + for (tab = kCat3456[cat]; *tab; ++tab) + { + v += v + VP8GetBit(br, *tab); + } + v += 3 + (8 << cat); + } + } + p = prob[kBands[n]][2]; + } + j = kZigzag[n - 1]; + + out[j] = GetSigned(br, v); + + if (n == 16 || !VP8GetBit(br, p[0])) + { /* EOB */ + return n; + } + } + if (n == 16) + { + return 16; + } + } +} + +int vp8_decode_mb_tokens(VP8D_COMP *dx, MACROBLOCKD *x) +{ + BOOL_DECODER *bc = x->current_bc; + const FRAME_CONTEXT * const fc = &dx->common.fc; + char *eobs = x->eobs; + + int i; + int nonzeros; + int eobtotal = 0; + + short *qcoeff_ptr; + ProbaArray coef_probs; + ENTROPY_CONTEXT *a_ctx = ((ENTROPY_CONTEXT *)x->above_context); + ENTROPY_CONTEXT *l_ctx = ((ENTROPY_CONTEXT *)x->left_context); + ENTROPY_CONTEXT *a; + ENTROPY_CONTEXT *l; + int skip_dc = 0; + + qcoeff_ptr = &x->qcoeff[0]; + + if (!x->mode_info_context->mbmi.is_4x4) + { + a = a_ctx + 8; + l = l_ctx + 8; + + coef_probs = fc->coef_probs [1]; + + nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr + 24 * 16); + *a = *l = (nonzeros > 0); + + eobs[24] = nonzeros; + eobtotal += nonzeros - 16; + + coef_probs = fc->coef_probs [0]; + skip_dc = 1; + } + else + { + coef_probs = fc->coef_probs [3]; + skip_dc = 0; + } + + for (i = 0; i < 16; ++i) + { + a = a_ctx + (i&3); + l = l_ctx + ((i&0xc)>>2); + + nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), skip_dc, qcoeff_ptr); + *a = *l = (nonzeros > 0); + + nonzeros += skip_dc; + eobs[i] = nonzeros; + eobtotal += nonzeros; + qcoeff_ptr += 16; + } + + coef_probs = fc->coef_probs [2]; + + a_ctx += 4; + l_ctx += 4; + for (i = 16; i < 24; ++i) + { + a = a_ctx + ((i > 19)<<1) + (i&1); + l = l_ctx + ((i > 19)<<1) + ((i&3)>1); + + nonzeros = GetCoeffs(bc, coef_probs, (*a + *l), 0, qcoeff_ptr); + *a = *l = (nonzeros > 0); + + eobs[i] = nonzeros; + eobtotal += nonzeros; + qcoeff_ptr += 16; + } + + return eobtotal; +} + diff --git a/media/libvpx/vp8/decoder/detokenize.h b/media/libvpx/vp8/decoder/detokenize.h new file mode 100644 index 000000000..f0b125444 --- /dev/null +++ b/media/libvpx/vp8/decoder/detokenize.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP8_DECODER_DETOKENIZE_H_ +#define VP8_DECODER_DETOKENIZE_H_ + +#include "onyxd_int.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void vp8_reset_mb_tokens_context(MACROBLOCKD *x); +int vp8_decode_mb_tokens(VP8D_COMP *, MACROBLOCKD *); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_DETOKENIZE_H_ diff --git a/media/libvpx/vp8/decoder/ec_types.h b/media/libvpx/vp8/decoder/ec_types.h new file mode 100644 index 000000000..3af5ca86b --- /dev/null +++ b/media/libvpx/vp8/decoder/ec_types.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#ifndef VP8_DECODER_EC_TYPES_H_ +#define VP8_DECODER_EC_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_OVERLAPS 16 + + +/* The area (pixel area in Q6) the block pointed to by bmi overlaps + * another block with. + */ +typedef struct +{ + int overlap; + union b_mode_info *bmi; +} OVERLAP_NODE; + +/* Structure to keep track of overlapping blocks on a block level. */ +typedef struct +{ + /* TODO(holmer): This array should be exchanged for a linked list */ + OVERLAP_NODE overlaps[MAX_OVERLAPS]; +} B_OVERLAP; + +/* Structure used to hold all the overlaps of a macroblock. The overlaps of a + * macroblock is further divided into block overlaps. + */ +typedef struct +{ + B_OVERLAP overlaps[16]; +} MB_OVERLAP; + +/* Structure for keeping track of motion vectors and which reference frame they + * refer to. Used for motion vector interpolation. + */ +typedef struct +{ + MV mv; + MV_REFERENCE_FRAME ref_frame; +} EC_BLOCK; + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_EC_TYPES_H_ diff --git a/media/libvpx/vp8/decoder/error_concealment.c b/media/libvpx/vp8/decoder/error_concealment.c new file mode 100644 index 000000000..bb6d443c4 --- /dev/null +++ b/media/libvpx/vp8/decoder/error_concealment.c @@ -0,0 +1,596 @@ +/* + * Copyright (c) 2011 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#include <assert.h> + +#include "error_concealment.h" +#include "onyxd_int.h" +#include "decodemv.h" +#include "vpx_mem/vpx_mem.h" +#include "vp8/common/findnearmv.h" +#include "vp8/common/common.h" + +#define FLOOR(x,q) ((x) & -(1 << (q))) + +#define NUM_NEIGHBORS 20 + +typedef struct ec_position +{ + int row; + int col; +} EC_POS; + +/* + * Regenerate the table in Matlab with: + * x = meshgrid((1:4), (1:4)); + * y = meshgrid((1:4), (1:4))'; + * W = round((1./(sqrt(x.^2 + y.^2))*2^7)); + * W(1,1) = 0; + */ +static const int weights_q7[5][5] = { + { 0, 128, 64, 43, 32 }, + {128, 91, 57, 40, 31 }, + { 64, 57, 45, 36, 29 }, + { 43, 40, 36, 30, 26 }, + { 32, 31, 29, 26, 23 } +}; + +int vp8_alloc_overlap_lists(VP8D_COMP *pbi) +{ + if (pbi->overlaps != NULL) + { + vpx_free(pbi->overlaps); + pbi->overlaps = NULL; + } + + pbi->overlaps = vpx_calloc(pbi->common.mb_rows * pbi->common.mb_cols, + sizeof(MB_OVERLAP)); + + if (pbi->overlaps == NULL) + return -1; + + return 0; +} + +void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi) +{ + vpx_free(pbi->overlaps); + pbi->overlaps = NULL; +} + +/* Inserts a new overlap area value to the list of overlaps of a block */ +static void assign_overlap(OVERLAP_NODE* overlaps, + union b_mode_info *bmi, + int overlap) +{ + int i; + if (overlap <= 0) + return; + /* Find and assign to the next empty overlap node in the list of overlaps. + * Empty is defined as bmi == NULL */ + for (i = 0; i < MAX_OVERLAPS; i++) + { + if (overlaps[i].bmi == NULL) + { + overlaps[i].bmi = bmi; + overlaps[i].overlap = overlap; + break; + } + } +} + +/* Calculates the overlap area between two 4x4 squares, where the first + * square has its upper-left corner at (b1_row, b1_col) and the second + * square has its upper-left corner at (b2_row, b2_col). Doesn't + * properly handle squares which do not overlap. + */ +static int block_overlap(int b1_row, int b1_col, int b2_row, int b2_col) +{ + const int int_top = MAX(b1_row, b2_row); // top + const int int_left = MAX(b1_col, b2_col); // left + /* Since each block is 4x4 pixels, adding 4 (Q3) to the left/top edge + * gives us the right/bottom edge. + */ + const int int_right = MIN(b1_col + (4<<3), b2_col + (4<<3)); // right + const int int_bottom = MIN(b1_row + (4<<3), b2_row + (4<<3)); // bottom + return (int_bottom - int_top) * (int_right - int_left); +} + +/* Calculates the overlap area for all blocks in a macroblock at position + * (mb_row, mb_col) in macroblocks, which are being overlapped by a given + * overlapping block at position (new_row, new_col) (in pixels, Q3). The + * first block being overlapped in the macroblock has position (first_blk_row, + * first_blk_col) in blocks relative the upper-left corner of the image. + */ +static void calculate_overlaps_mb(B_OVERLAP *b_overlaps, union b_mode_info *bmi, + int new_row, int new_col, + int mb_row, int mb_col, + int first_blk_row, int first_blk_col) +{ + /* Find the blocks within this MB (defined by mb_row, mb_col) which are + * overlapped by bmi and calculate and assign overlap for each of those + * blocks. */ + + /* Block coordinates relative the upper-left block */ + const int rel_ol_blk_row = first_blk_row - mb_row * 4; + const int rel_ol_blk_col = first_blk_col - mb_col * 4; + /* If the block partly overlaps any previous MB, these coordinates + * can be < 0. We don't want to access blocks in previous MBs. + */ + const int blk_idx = MAX(rel_ol_blk_row,0) * 4 + MAX(rel_ol_blk_col,0); + /* Upper left overlapping block */ + B_OVERLAP *b_ol_ul = &(b_overlaps[blk_idx]); + + /* Calculate and assign overlaps for all blocks in this MB + * which the motion compensated block overlaps + */ + /* Avoid calculating overlaps for blocks in later MBs */ + int end_row = MIN(4 + mb_row * 4 - first_blk_row, 2); + int end_col = MIN(4 + mb_col * 4 - first_blk_col, 2); + int row, col; + + /* Check if new_row and new_col are evenly divisible by 4 (Q3), + * and if so we shouldn't check neighboring blocks + */ + if (new_row >= 0 && (new_row & 0x1F) == 0) + end_row = 1; + if (new_col >= 0 && (new_col & 0x1F) == 0) + end_col = 1; + + /* Check if the overlapping block partly overlaps a previous MB + * and if so, we're overlapping fewer blocks in this MB. + */ + if (new_row < (mb_row*16)<<3) + end_row = 1; + if (new_col < (mb_col*16)<<3) + end_col = 1; + + for (row = 0; row < end_row; ++row) + { + for (col = 0; col < end_col; ++col) + { + /* input in Q3, result in Q6 */ + const int overlap = block_overlap(new_row, new_col, + (((first_blk_row + row) * + 4) << 3), + (((first_blk_col + col) * + 4) << 3)); + assign_overlap(b_ol_ul[row * 4 + col].overlaps, bmi, overlap); + } + } +} + +void vp8_calculate_overlaps(MB_OVERLAP *overlap_ul, + int mb_rows, int mb_cols, + union b_mode_info *bmi, + int b_row, int b_col) +{ + MB_OVERLAP *mb_overlap; + int row, col, rel_row, rel_col; + int new_row, new_col; + int end_row, end_col; + int overlap_b_row, overlap_b_col; + int overlap_mb_row, overlap_mb_col; + + /* mb subpixel position */ + row = (4 * b_row) << 3; /* Q3 */ + col = (4 * b_col) << 3; /* Q3 */ + + /* reverse compensate for motion */ + new_row = row - bmi->mv.as_mv.row; + new_col = col - bmi->mv.as_mv.col; + + if (new_row >= ((16*mb_rows) << 3) || new_col >= ((16*mb_cols) << 3)) + { + /* the new block ended up outside the frame */ + return; + } + + if (new_row <= (-4 << 3) || new_col <= (-4 << 3)) + { + /* outside the frame */ + return; + } + /* overlapping block's position in blocks */ + overlap_b_row = FLOOR(new_row / 4, 3) >> 3; + overlap_b_col = FLOOR(new_col / 4, 3) >> 3; + + /* overlapping block's MB position in MBs + * operations are done in Q3 + */ + overlap_mb_row = FLOOR((overlap_b_row << 3) / 4, 3) >> 3; + overlap_mb_col = FLOOR((overlap_b_col << 3) / 4, 3) >> 3; + + end_row = MIN(mb_rows - overlap_mb_row, 2); + end_col = MIN(mb_cols - overlap_mb_col, 2); + + /* Don't calculate overlap for MBs we don't overlap */ + /* Check if the new block row starts at the last block row of the MB */ + if (abs(new_row - ((16*overlap_mb_row) << 3)) < ((3*4) << 3)) + end_row = 1; + /* Check if the new block col starts at the last block col of the MB */ + if (abs(new_col - ((16*overlap_mb_col) << 3)) < ((3*4) << 3)) + end_col = 1; + + /* find the MB(s) this block is overlapping */ + for (rel_row = 0; rel_row < end_row; ++rel_row) + { + for (rel_col = 0; rel_col < end_col; ++rel_col) + { + if (overlap_mb_row + rel_row < 0 || + overlap_mb_col + rel_col < 0) + continue; + mb_overlap = overlap_ul + (overlap_mb_row + rel_row) * mb_cols + + overlap_mb_col + rel_col; + + calculate_overlaps_mb(mb_overlap->overlaps, bmi, + new_row, new_col, + overlap_mb_row + rel_row, + overlap_mb_col + rel_col, + overlap_b_row + rel_row, + overlap_b_col + rel_col); + } + } +} + +/* Estimates a motion vector given the overlapping blocks' motion vectors. + * Filters out all overlapping blocks which do not refer to the correct + * reference frame type. + */ +static void estimate_mv(const OVERLAP_NODE *overlaps, union b_mode_info *bmi) +{ + int i; + int overlap_sum = 0; + int row_acc = 0; + int col_acc = 0; + + bmi->mv.as_int = 0; + for (i=0; i < MAX_OVERLAPS; ++i) + { + if (overlaps[i].bmi == NULL) + break; + col_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.col; + row_acc += overlaps[i].overlap * overlaps[i].bmi->mv.as_mv.row; + overlap_sum += overlaps[i].overlap; + } + if (overlap_sum > 0) + { + /* Q9 / Q6 = Q3 */ + bmi->mv.as_mv.col = col_acc / overlap_sum; + bmi->mv.as_mv.row = row_acc / overlap_sum; + } + else + { + bmi->mv.as_mv.col = 0; + bmi->mv.as_mv.row = 0; + } +} + +/* Estimates all motion vectors for a macroblock given the lists of + * overlaps for each block. Decides whether or not the MVs must be clamped. + */ +static void estimate_mb_mvs(const B_OVERLAP *block_overlaps, + MODE_INFO *mi, + int mb_to_left_edge, + int mb_to_right_edge, + int mb_to_top_edge, + int mb_to_bottom_edge) +{ + int row, col; + int non_zero_count = 0; + MV * const filtered_mv = &(mi->mbmi.mv.as_mv); + union b_mode_info * const bmi = mi->bmi; + filtered_mv->col = 0; + filtered_mv->row = 0; + mi->mbmi.need_to_clamp_mvs = 0; + for (row = 0; row < 4; ++row) + { + int this_b_to_top_edge = mb_to_top_edge + ((row*4)<<3); + int this_b_to_bottom_edge = mb_to_bottom_edge - ((row*4)<<3); + for (col = 0; col < 4; ++col) + { + int i = row * 4 + col; + int this_b_to_left_edge = mb_to_left_edge + ((col*4)<<3); + int this_b_to_right_edge = mb_to_right_edge - ((col*4)<<3); + /* Estimate vectors for all blocks which are overlapped by this */ + /* type. Interpolate/extrapolate the rest of the block's MVs */ + estimate_mv(block_overlaps[i].overlaps, &(bmi[i])); + mi->mbmi.need_to_clamp_mvs |= vp8_check_mv_bounds( + &bmi[i].mv, + this_b_to_left_edge, + this_b_to_right_edge, + this_b_to_top_edge, + this_b_to_bottom_edge); + if (bmi[i].mv.as_int != 0) + { + ++non_zero_count; + filtered_mv->col += bmi[i].mv.as_mv.col; + filtered_mv->row += bmi[i].mv.as_mv.row; + } + } + } + if (non_zero_count > 0) + { + filtered_mv->col /= non_zero_count; + filtered_mv->row /= non_zero_count; + } +} + +static void calc_prev_mb_overlaps(MB_OVERLAP *overlaps, MODE_INFO *prev_mi, + int mb_row, int mb_col, + int mb_rows, int mb_cols) +{ + int sub_row; + int sub_col; + for (sub_row = 0; sub_row < 4; ++sub_row) + { + for (sub_col = 0; sub_col < 4; ++sub_col) + { + vp8_calculate_overlaps( + overlaps, mb_rows, mb_cols, + &(prev_mi->bmi[sub_row * 4 + sub_col]), + 4 * mb_row + sub_row, + 4 * mb_col + sub_col); + } + } +} + +/* Estimate all missing motion vectors. This function does the same as the one + * above, but has different input arguments. */ +static void estimate_missing_mvs(MB_OVERLAP *overlaps, + MODE_INFO *mi, MODE_INFO *prev_mi, + int mb_rows, int mb_cols, + unsigned int first_corrupt) +{ + int mb_row, mb_col; + memset(overlaps, 0, sizeof(MB_OVERLAP) * mb_rows * mb_cols); + /* First calculate the overlaps for all blocks */ + for (mb_row = 0; mb_row < mb_rows; ++mb_row) + { + for (mb_col = 0; mb_col < mb_cols; ++mb_col) + { + /* We're only able to use blocks referring to the last frame + * when extrapolating new vectors. + */ + if (prev_mi->mbmi.ref_frame == LAST_FRAME) + { + calc_prev_mb_overlaps(overlaps, prev_mi, + mb_row, mb_col, + mb_rows, mb_cols); + } + ++prev_mi; + } + ++prev_mi; + } + + mb_row = first_corrupt / mb_cols; + mb_col = first_corrupt - mb_row * mb_cols; + mi += mb_row*(mb_cols + 1) + mb_col; + /* Go through all macroblocks in the current image with missing MVs + * and calculate new MVs using the overlaps. + */ + for (; mb_row < mb_rows; ++mb_row) + { + int mb_to_top_edge = -((mb_row * 16)) << 3; + int mb_to_bottom_edge = ((mb_rows - 1 - mb_row) * 16) << 3; + for (; mb_col < mb_cols; ++mb_col) + { + int mb_to_left_edge = -((mb_col * 16) << 3); + int mb_to_right_edge = ((mb_cols - 1 - mb_col) * 16) << 3; + const B_OVERLAP *block_overlaps = + overlaps[mb_row*mb_cols + mb_col].overlaps; + mi->mbmi.ref_frame = LAST_FRAME; + mi->mbmi.mode = SPLITMV; + mi->mbmi.uv_mode = DC_PRED; + mi->mbmi.partitioning = 3; + mi->mbmi.segment_id = 0; + estimate_mb_mvs(block_overlaps, + mi, + mb_to_left_edge, + mb_to_right_edge, + mb_to_top_edge, + mb_to_bottom_edge); + ++mi; + } + mb_col = 0; + ++mi; + } +} + +void vp8_estimate_missing_mvs(VP8D_COMP *pbi) +{ + VP8_COMMON * const pc = &pbi->common; + estimate_missing_mvs(pbi->overlaps, + pc->mi, pc->prev_mi, + pc->mb_rows, pc->mb_cols, + pbi->mvs_corrupt_from_mb); +} + +static void assign_neighbor(EC_BLOCK *neighbor, MODE_INFO *mi, int block_idx) +{ + assert(mi->mbmi.ref_frame < MAX_REF_FRAMES); + neighbor->ref_frame = mi->mbmi.ref_frame; + neighbor->mv = mi->bmi[block_idx].mv.as_mv; +} + +/* Finds the neighboring blocks of a macroblocks. In the general case + * 20 blocks are found. If a fewer number of blocks are found due to + * image boundaries, those positions in the EC_BLOCK array are left "empty". + * The neighbors are enumerated with the upper-left neighbor as the first + * element, the second element refers to the neighbor to right of the previous + * neighbor, and so on. The last element refers to the neighbor below the first + * neighbor. + */ +static void find_neighboring_blocks(MODE_INFO *mi, + EC_BLOCK *neighbors, + int mb_row, int mb_col, + int mb_rows, int mb_cols, + int mi_stride) +{ + int i = 0; + int j; + if (mb_row > 0) + { + /* upper left */ + if (mb_col > 0) + assign_neighbor(&neighbors[i], mi - mi_stride - 1, 15); + ++i; + /* above */ + for (j = 12; j < 16; ++j, ++i) + assign_neighbor(&neighbors[i], mi - mi_stride, j); + } + else + i += 5; + if (mb_col < mb_cols - 1) + { + /* upper right */ + if (mb_row > 0) + assign_neighbor(&neighbors[i], mi - mi_stride + 1, 12); + ++i; + /* right */ + for (j = 0; j <= 12; j += 4, ++i) + assign_neighbor(&neighbors[i], mi + 1, j); + } + else + i += 5; + if (mb_row < mb_rows - 1) + { + /* lower right */ + if (mb_col < mb_cols - 1) + assign_neighbor(&neighbors[i], mi + mi_stride + 1, 0); + ++i; + /* below */ + for (j = 0; j < 4; ++j, ++i) + assign_neighbor(&neighbors[i], mi + mi_stride, j); + } + else + i += 5; + if (mb_col > 0) + { + /* lower left */ + if (mb_row < mb_rows - 1) + assign_neighbor(&neighbors[i], mi + mi_stride - 1, 4); + ++i; + /* left */ + for (j = 3; j < 16; j += 4, ++i) + { + assign_neighbor(&neighbors[i], mi - 1, j); + } + } + else + i += 5; + assert(i == 20); +} + +/* Interpolates all motion vectors for a macroblock from the neighboring blocks' + * motion vectors. + */ +static void interpolate_mvs(MACROBLOCKD *mb, + EC_BLOCK *neighbors, + MV_REFERENCE_FRAME dom_ref_frame) +{ + int row, col, i; + MODE_INFO * const mi = mb->mode_info_context; + /* Table with the position of the neighboring blocks relative the position + * of the upper left block of the current MB. Starting with the upper left + * neighbor and going to the right. + */ + const EC_POS neigh_pos[NUM_NEIGHBORS] = { + {-1,-1}, {-1,0}, {-1,1}, {-1,2}, {-1,3}, + {-1,4}, {0,4}, {1,4}, {2,4}, {3,4}, + {4,4}, {4,3}, {4,2}, {4,1}, {4,0}, + {4,-1}, {3,-1}, {2,-1}, {1,-1}, {0,-1} + }; + mi->mbmi.need_to_clamp_mvs = 0; + for (row = 0; row < 4; ++row) + { + int mb_to_top_edge = mb->mb_to_top_edge + ((row*4)<<3); + int mb_to_bottom_edge = mb->mb_to_bottom_edge - ((row*4)<<3); + for (col = 0; col < 4; ++col) + { + int mb_to_left_edge = mb->mb_to_left_edge + ((col*4)<<3); + int mb_to_right_edge = mb->mb_to_right_edge - ((col*4)<<3); + int w_sum = 0; + int mv_row_sum = 0; + int mv_col_sum = 0; + int_mv * const mv = &(mi->bmi[row*4 + col].mv); + mv->as_int = 0; + for (i = 0; i < NUM_NEIGHBORS; ++i) + { + /* Calculate the weighted sum of neighboring MVs referring + * to the dominant frame type. + */ + const int w = weights_q7[abs(row - neigh_pos[i].row)] + [abs(col - neigh_pos[i].col)]; + if (neighbors[i].ref_frame != dom_ref_frame) + continue; + w_sum += w; + /* Q7 * Q3 = Q10 */ + mv_row_sum += w*neighbors[i].mv.row; + mv_col_sum += w*neighbors[i].mv.col; + } + if (w_sum > 0) + { + /* Avoid division by zero. + * Normalize with the sum of the coefficients + * Q3 = Q10 / Q7 + */ + mv->as_mv.row = mv_row_sum / w_sum; + mv->as_mv.col = mv_col_sum / w_sum; + mi->mbmi.need_to_clamp_mvs |= vp8_check_mv_bounds( + mv, + mb_to_left_edge, + mb_to_right_edge, + mb_to_top_edge, + mb_to_bottom_edge); + } + } + } +} + +void vp8_interpolate_motion(MACROBLOCKD *mb, + int mb_row, int mb_col, + int mb_rows, int mb_cols, + int mi_stride) +{ + /* Find relevant neighboring blocks */ + EC_BLOCK neighbors[NUM_NEIGHBORS]; + int i; + /* Initialize the array. MAX_REF_FRAMES is interpreted as "doesn't exist" */ + for (i = 0; i < NUM_NEIGHBORS; ++i) + { + neighbors[i].ref_frame = MAX_REF_FRAMES; + neighbors[i].mv.row = neighbors[i].mv.col = 0; + } + find_neighboring_blocks(mb->mode_info_context, + neighbors, + mb_row, mb_col, + mb_rows, mb_cols, + mb->mode_info_stride); + /* Interpolate MVs for the missing blocks from the surrounding + * blocks which refer to the last frame. */ + interpolate_mvs(mb, neighbors, LAST_FRAME); + + mb->mode_info_context->mbmi.ref_frame = LAST_FRAME; + mb->mode_info_context->mbmi.mode = SPLITMV; + mb->mode_info_context->mbmi.uv_mode = DC_PRED; + mb->mode_info_context->mbmi.partitioning = 3; + mb->mode_info_context->mbmi.segment_id = 0; +} + +void vp8_conceal_corrupt_mb(MACROBLOCKD *xd) +{ + /* This macroblock has corrupt residual, use the motion compensated + image (predictor) for concealment */ + + /* The build predictor functions now output directly into the dst buffer, + * so the copies are no longer necessary */ + +} diff --git a/media/libvpx/vp8/decoder/error_concealment.h b/media/libvpx/vp8/decoder/error_concealment.h new file mode 100644 index 000000000..9a1e02486 --- /dev/null +++ b/media/libvpx/vp8/decoder/error_concealment.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2011 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#ifndef VP8_DECODER_ERROR_CONCEALMENT_H_ +#define VP8_DECODER_ERROR_CONCEALMENT_H_ + +#include "onyxd_int.h" +#include "ec_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Allocate memory for the overlap lists */ +int vp8_alloc_overlap_lists(VP8D_COMP *pbi); + +/* Deallocate the overlap lists */ +void vp8_de_alloc_overlap_lists(VP8D_COMP *pbi); + +/* Estimate all missing motion vectors. */ +void vp8_estimate_missing_mvs(VP8D_COMP *pbi); + +/* Functions for spatial MV interpolation */ + +/* Interpolates all motion vectors for a macroblock mb at position + * (mb_row, mb_col). */ +void vp8_interpolate_motion(MACROBLOCKD *mb, + int mb_row, int mb_col, + int mb_rows, int mb_cols, + int mi_stride); + +/* Conceal a macroblock with corrupt residual. + * Copies the prediction signal to the reconstructed image. + */ +void vp8_conceal_corrupt_mb(MACROBLOCKD *xd); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_ERROR_CONCEALMENT_H_ diff --git a/media/libvpx/vp8/decoder/onyxd_if.c b/media/libvpx/vp8/decoder/onyxd_if.c new file mode 100644 index 000000000..9015fcbb4 --- /dev/null +++ b/media/libvpx/vp8/decoder/onyxd_if.c @@ -0,0 +1,505 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#include "vp8/common/onyxc_int.h" +#if CONFIG_POSTPROC +#include "vp8/common/postproc.h" +#endif +#include "vp8/common/onyxd.h" +#include "onyxd_int.h" +#include "vpx_mem/vpx_mem.h" +#include "vp8/common/alloccommon.h" +#include "vp8/common/loopfilter.h" +#include "vp8/common/swapyv12buffer.h" +#include "vp8/common/threading.h" +#include "decoderthreading.h" +#include <stdio.h> +#include <assert.h> + +#include "vp8/common/quant_common.h" +#include "./vpx_scale_rtcd.h" +#include "vpx_scale/vpx_scale.h" +#include "vp8/common/systemdependent.h" +#include "vpx_ports/vpx_timer.h" +#include "detokenize.h" +#if CONFIG_ERROR_CONCEALMENT +#include "error_concealment.h" +#endif +#if ARCH_ARM +#include "vpx_ports/arm.h" +#endif + +extern void vp8_init_loop_filter(VP8_COMMON *cm); +extern void vp8cx_init_de_quantizer(VP8D_COMP *pbi); +static int get_free_fb (VP8_COMMON *cm); +static void ref_cnt_fb (int *buf, int *idx, int new_idx); + +static void remove_decompressor(VP8D_COMP *pbi) +{ +#if CONFIG_ERROR_CONCEALMENT + vp8_de_alloc_overlap_lists(pbi); +#endif + vp8_remove_common(&pbi->common); + vpx_free(pbi); +} + +static struct VP8D_COMP * create_decompressor(VP8D_CONFIG *oxcf) +{ + VP8D_COMP *pbi = vpx_memalign(32, sizeof(VP8D_COMP)); + + if (!pbi) + return NULL; + + memset(pbi, 0, sizeof(VP8D_COMP)); + + if (setjmp(pbi->common.error.jmp)) + { + pbi->common.error.setjmp = 0; + remove_decompressor(pbi); + return 0; + } + + pbi->common.error.setjmp = 1; + + vp8_create_common(&pbi->common); + + pbi->common.current_video_frame = 0; + pbi->ready_for_new_data = 1; + + /* vp8cx_init_de_quantizer() is first called here. Add check in frame_init_dequantizer() to avoid + * unnecessary calling of vp8cx_init_de_quantizer() for every frame. + */ + vp8cx_init_de_quantizer(pbi); + + vp8_loop_filter_init(&pbi->common); + + pbi->common.error.setjmp = 0; + +#if CONFIG_ERROR_CONCEALMENT + pbi->ec_enabled = oxcf->error_concealment; + pbi->overlaps = NULL; +#else + (void)oxcf; + pbi->ec_enabled = 0; +#endif + /* Error concealment is activated after a key frame has been + * decoded without errors when error concealment is enabled. + */ + pbi->ec_active = 0; + + pbi->decoded_key_frame = 0; + + /* Independent partitions is activated when a frame updates the + * token probability table to have equal probabilities over the + * PREV_COEF context. + */ + pbi->independent_partitions = 0; + + vp8_setup_block_dptrs(&pbi->mb); + + return pbi; +} + +vpx_codec_err_t vp8dx_get_reference(VP8D_COMP *pbi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd) +{ + VP8_COMMON *cm = &pbi->common; + int ref_fb_idx; + + if (ref_frame_flag == VP8_LAST_FRAME) + ref_fb_idx = cm->lst_fb_idx; + else if (ref_frame_flag == VP8_GOLD_FRAME) + ref_fb_idx = cm->gld_fb_idx; + else if (ref_frame_flag == VP8_ALTR_FRAME) + ref_fb_idx = cm->alt_fb_idx; + else{ + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Invalid reference frame"); + return pbi->common.error.error_code; + } + + if(cm->yv12_fb[ref_fb_idx].y_height != sd->y_height || + cm->yv12_fb[ref_fb_idx].y_width != sd->y_width || + cm->yv12_fb[ref_fb_idx].uv_height != sd->uv_height || + cm->yv12_fb[ref_fb_idx].uv_width != sd->uv_width){ + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Incorrect buffer dimensions"); + } + else + vp8_yv12_copy_frame(&cm->yv12_fb[ref_fb_idx], sd); + + return pbi->common.error.error_code; +} + + +vpx_codec_err_t vp8dx_set_reference(VP8D_COMP *pbi, enum vpx_ref_frame_type ref_frame_flag, YV12_BUFFER_CONFIG *sd) +{ + VP8_COMMON *cm = &pbi->common; + int *ref_fb_ptr = NULL; + int free_fb; + + if (ref_frame_flag == VP8_LAST_FRAME) + ref_fb_ptr = &cm->lst_fb_idx; + else if (ref_frame_flag == VP8_GOLD_FRAME) + ref_fb_ptr = &cm->gld_fb_idx; + else if (ref_frame_flag == VP8_ALTR_FRAME) + ref_fb_ptr = &cm->alt_fb_idx; + else{ + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Invalid reference frame"); + return pbi->common.error.error_code; + } + + if(cm->yv12_fb[*ref_fb_ptr].y_height != sd->y_height || + cm->yv12_fb[*ref_fb_ptr].y_width != sd->y_width || + cm->yv12_fb[*ref_fb_ptr].uv_height != sd->uv_height || + cm->yv12_fb[*ref_fb_ptr].uv_width != sd->uv_width){ + vpx_internal_error(&pbi->common.error, VPX_CODEC_ERROR, + "Incorrect buffer dimensions"); + } + else{ + /* Find an empty frame buffer. */ + free_fb = get_free_fb(cm); + /* Decrease fb_idx_ref_cnt since it will be increased again in + * ref_cnt_fb() below. */ + cm->fb_idx_ref_cnt[free_fb]--; + + /* Manage the reference counters and copy image. */ + ref_cnt_fb (cm->fb_idx_ref_cnt, ref_fb_ptr, free_fb); + vp8_yv12_copy_frame(sd, &cm->yv12_fb[*ref_fb_ptr]); + } + + return pbi->common.error.error_code; +} + +static int get_free_fb (VP8_COMMON *cm) +{ + int i; + for (i = 0; i < NUM_YV12_BUFFERS; i++) + if (cm->fb_idx_ref_cnt[i] == 0) + break; + + assert(i < NUM_YV12_BUFFERS); + cm->fb_idx_ref_cnt[i] = 1; + return i; +} + +static void ref_cnt_fb (int *buf, int *idx, int new_idx) +{ + if (buf[*idx] > 0) + buf[*idx]--; + + *idx = new_idx; + + buf[new_idx]++; +} + +/* If any buffer copy / swapping is signalled it should be done here. */ +static int swap_frame_buffers (VP8_COMMON *cm) +{ + int err = 0; + + /* The alternate reference frame or golden frame can be updated + * using the new, last, or golden/alt ref frame. If it + * is updated using the newly decoded frame it is a refresh. + * An update using the last or golden/alt ref frame is a copy. + */ + if (cm->copy_buffer_to_arf) + { + int new_fb = 0; + + if (cm->copy_buffer_to_arf == 1) + new_fb = cm->lst_fb_idx; + else if (cm->copy_buffer_to_arf == 2) + new_fb = cm->gld_fb_idx; + else + err = -1; + + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, new_fb); + } + + if (cm->copy_buffer_to_gf) + { + int new_fb = 0; + + if (cm->copy_buffer_to_gf == 1) + new_fb = cm->lst_fb_idx; + else if (cm->copy_buffer_to_gf == 2) + new_fb = cm->alt_fb_idx; + else + err = -1; + + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, new_fb); + } + + if (cm->refresh_golden_frame) + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->gld_fb_idx, cm->new_fb_idx); + + if (cm->refresh_alt_ref_frame) + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->alt_fb_idx, cm->new_fb_idx); + + if (cm->refresh_last_frame) + { + ref_cnt_fb (cm->fb_idx_ref_cnt, &cm->lst_fb_idx, cm->new_fb_idx); + + cm->frame_to_show = &cm->yv12_fb[cm->lst_fb_idx]; + } + else + cm->frame_to_show = &cm->yv12_fb[cm->new_fb_idx]; + + cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + + return err; +} + +static int check_fragments_for_errors(VP8D_COMP *pbi) +{ + if (!pbi->ec_active && + pbi->fragments.count <= 1 && pbi->fragments.sizes[0] == 0) + { + VP8_COMMON *cm = &pbi->common; + + /* If error concealment is disabled we won't signal missing frames + * to the decoder. + */ + if (cm->fb_idx_ref_cnt[cm->lst_fb_idx] > 1) + { + /* The last reference shares buffer with another reference + * buffer. Move it to its own buffer before setting it as + * corrupt, otherwise we will make multiple buffers corrupt. + */ + const int prev_idx = cm->lst_fb_idx; + cm->fb_idx_ref_cnt[prev_idx]--; + cm->lst_fb_idx = get_free_fb(cm); + vp8_yv12_copy_frame(&cm->yv12_fb[prev_idx], + &cm->yv12_fb[cm->lst_fb_idx]); + } + /* This is used to signal that we are missing frames. + * We do not know if the missing frame(s) was supposed to update + * any of the reference buffers, but we act conservative and + * mark only the last buffer as corrupted. + */ + cm->yv12_fb[cm->lst_fb_idx].corrupted = 1; + + /* Signal that we have no frame to show. */ + cm->show_frame = 0; + + /* Nothing more to do. */ + return 0; + } + + return 1; +} + +int vp8dx_receive_compressed_data(VP8D_COMP *pbi, size_t size, + const uint8_t *source, + int64_t time_stamp) +{ + VP8_COMMON *cm = &pbi->common; + int retcode = -1; + (void)size; + (void)source; + + pbi->common.error.error_code = VPX_CODEC_OK; + + retcode = check_fragments_for_errors(pbi); + if(retcode <= 0) + return retcode; + + cm->new_fb_idx = get_free_fb (cm); + + /* setup reference frames for vp8_decode_frame */ + pbi->dec_fb_ref[INTRA_FRAME] = &cm->yv12_fb[cm->new_fb_idx]; + pbi->dec_fb_ref[LAST_FRAME] = &cm->yv12_fb[cm->lst_fb_idx]; + pbi->dec_fb_ref[GOLDEN_FRAME] = &cm->yv12_fb[cm->gld_fb_idx]; + pbi->dec_fb_ref[ALTREF_FRAME] = &cm->yv12_fb[cm->alt_fb_idx]; + + if (setjmp(pbi->common.error.jmp)) + { + /* We do not know if the missing frame(s) was supposed to update + * any of the reference buffers, but we act conservative and + * mark only the last buffer as corrupted. + */ + cm->yv12_fb[cm->lst_fb_idx].corrupted = 1; + + if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) + cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + + goto decode_exit; + } + + pbi->common.error.setjmp = 1; + + retcode = vp8_decode_frame(pbi); + + if (retcode < 0) + { + if (cm->fb_idx_ref_cnt[cm->new_fb_idx] > 0) + cm->fb_idx_ref_cnt[cm->new_fb_idx]--; + + pbi->common.error.error_code = VPX_CODEC_ERROR; + goto decode_exit; + } + + if (swap_frame_buffers (cm)) + { + pbi->common.error.error_code = VPX_CODEC_ERROR; + goto decode_exit; + } + + vp8_clear_system_state(); + + if (cm->show_frame) + { + cm->current_video_frame++; + cm->show_frame_mi = cm->mi; + } + + #if CONFIG_ERROR_CONCEALMENT + /* swap the mode infos to storage for future error concealment */ + if (pbi->ec_enabled && pbi->common.prev_mi) + { + MODE_INFO* tmp = pbi->common.prev_mi; + int row, col; + pbi->common.prev_mi = pbi->common.mi; + pbi->common.mi = tmp; + + /* Propagate the segment_ids to the next frame */ + for (row = 0; row < pbi->common.mb_rows; ++row) + { + for (col = 0; col < pbi->common.mb_cols; ++col) + { + const int i = row*pbi->common.mode_info_stride + col; + pbi->common.mi[i].mbmi.segment_id = + pbi->common.prev_mi[i].mbmi.segment_id; + } + } + } +#endif + + pbi->ready_for_new_data = 0; + pbi->last_time_stamp = time_stamp; + +decode_exit: + pbi->common.error.setjmp = 0; + vp8_clear_system_state(); + return retcode; +} +int vp8dx_get_raw_frame(VP8D_COMP *pbi, YV12_BUFFER_CONFIG *sd, int64_t *time_stamp, int64_t *time_end_stamp, vp8_ppflags_t *flags) +{ + int ret = -1; + + if (pbi->ready_for_new_data == 1) + return ret; + + /* ie no raw frame to show!!! */ + if (pbi->common.show_frame == 0) + return ret; + + pbi->ready_for_new_data = 1; + *time_stamp = pbi->last_time_stamp; + *time_end_stamp = 0; + +#if CONFIG_POSTPROC + ret = vp8_post_proc_frame(&pbi->common, sd, flags); +#else + (void)flags; + + if (pbi->common.frame_to_show) + { + *sd = *pbi->common.frame_to_show; + sd->y_width = pbi->common.Width; + sd->y_height = pbi->common.Height; + sd->uv_height = pbi->common.Height / 2; + ret = 0; + } + else + { + ret = -1; + } + +#endif /*!CONFIG_POSTPROC*/ + vp8_clear_system_state(); + return ret; +} + + +/* This function as written isn't decoder specific, but the encoder has + * much faster ways of computing this, so it's ok for it to live in a + * decode specific file. + */ +int vp8dx_references_buffer( VP8_COMMON *oci, int ref_frame ) +{ + const MODE_INFO *mi = oci->mi; + int mb_row, mb_col; + + for (mb_row = 0; mb_row < oci->mb_rows; mb_row++) + { + for (mb_col = 0; mb_col < oci->mb_cols; mb_col++,mi++) + { + if( mi->mbmi.ref_frame == ref_frame) + return 1; + } + mi++; + } + return 0; + +} + +int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf) +{ + if(!fb->use_frame_threads) + { + /* decoder instance for single thread mode */ + fb->pbi[0] = create_decompressor(oxcf); + if(!fb->pbi[0]) + return VPX_CODEC_ERROR; + +#if CONFIG_MULTITHREAD + /* enable row-based threading only when use_frame_threads + * is disabled */ + fb->pbi[0]->max_threads = oxcf->max_threads; + vp8_decoder_create_threads(fb->pbi[0]); +#endif + } + else + { + /* TODO : create frame threads and decoder instances for each + * thread here */ + } + + return VPX_CODEC_OK; +} + +int vp8_remove_decoder_instances(struct frame_buffers *fb) +{ + if(!fb->use_frame_threads) + { + VP8D_COMP *pbi = fb->pbi[0]; + + if (!pbi) + return VPX_CODEC_ERROR; +#if CONFIG_MULTITHREAD + if (pbi->b_multithreaded_rd) + vp8mt_de_alloc_temp_buffers(pbi, pbi->common.mb_rows); + vp8_decoder_remove_threads(pbi); +#endif + + /* decoder instance for single thread mode */ + remove_decompressor(pbi); + } + else + { + /* TODO : remove frame threads and decoder instances for each + * thread here */ + } + + return VPX_CODEC_OK; +} diff --git a/media/libvpx/vp8/decoder/onyxd_int.h b/media/libvpx/vp8/decoder/onyxd_int.h new file mode 100644 index 000000000..aa2cc57f7 --- /dev/null +++ b/media/libvpx/vp8/decoder/onyxd_int.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#ifndef VP8_DECODER_ONYXD_INT_H_ +#define VP8_DECODER_ONYXD_INT_H_ + +#include "vpx_config.h" +#include "vp8/common/onyxd.h" +#include "treereader.h" +#include "vp8/common/onyxc_int.h" +#include "vp8/common/threading.h" + +#if CONFIG_ERROR_CONCEALMENT +#include "ec_types.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct +{ + int ithread; + void *ptr1; + void *ptr2; +} DECODETHREAD_DATA; + +typedef struct +{ + MACROBLOCKD mbd; +} MB_ROW_DEC; + + +typedef struct +{ + int enabled; + unsigned int count; + const unsigned char *ptrs[MAX_PARTITIONS]; + unsigned int sizes[MAX_PARTITIONS]; +} FRAGMENT_DATA; + +#define MAX_FB_MT_DEC 32 + +struct frame_buffers +{ + /* + * this struct will be populated with frame buffer management + * info in future commits. */ + + /* enable/disable frame-based threading */ + int use_frame_threads; + + /* decoder instances */ + struct VP8D_COMP *pbi[MAX_FB_MT_DEC]; + +}; + +typedef struct VP8D_COMP +{ + DECLARE_ALIGNED(16, MACROBLOCKD, mb); + + YV12_BUFFER_CONFIG *dec_fb_ref[NUM_YV12_BUFFERS]; + + DECLARE_ALIGNED(16, VP8_COMMON, common); + + /* the last partition will be used for the modes/mvs */ + vp8_reader mbc[MAX_PARTITIONS]; + + VP8D_CONFIG oxcf; + + FRAGMENT_DATA fragments; + +#if CONFIG_MULTITHREAD + /* variable for threading */ + + volatile int b_multithreaded_rd; + int max_threads; + int current_mb_col_main; + unsigned int decoding_thread_count; + int allocated_decoding_thread_count; + + int mt_baseline_filter_level[MAX_MB_SEGMENTS]; + int sync_range; + int *mt_current_mb_col; /* Each row remembers its already decoded column. */ + + unsigned char **mt_yabove_row; /* mb_rows x width */ + unsigned char **mt_uabove_row; + unsigned char **mt_vabove_row; + unsigned char **mt_yleft_col; /* mb_rows x 16 */ + unsigned char **mt_uleft_col; /* mb_rows x 8 */ + unsigned char **mt_vleft_col; /* mb_rows x 8 */ + + MB_ROW_DEC *mb_row_di; + DECODETHREAD_DATA *de_thread_data; + + pthread_t *h_decoding_thread; + sem_t *h_event_start_decoding; + sem_t h_event_end_decoding; + /* end of threading data */ +#endif + + int64_t last_time_stamp; + int ready_for_new_data; + + vp8_prob prob_intra; + vp8_prob prob_last; + vp8_prob prob_gf; + vp8_prob prob_skip_false; + +#if CONFIG_ERROR_CONCEALMENT + MB_OVERLAP *overlaps; + /* the mb num from which modes and mvs (first partition) are corrupt */ + unsigned int mvs_corrupt_from_mb; +#endif + int ec_enabled; + int ec_active; + int decoded_key_frame; + int independent_partitions; + int frame_corrupt_residual; + + vpx_decrypt_cb decrypt_cb; + void *decrypt_state; +} VP8D_COMP; + +int vp8_decode_frame(VP8D_COMP *cpi); + +int vp8_create_decoder_instances(struct frame_buffers *fb, VP8D_CONFIG *oxcf); +int vp8_remove_decoder_instances(struct frame_buffers *fb); + +#if CONFIG_DEBUG +#define CHECK_MEM_ERROR(lval,expr) do {\ + lval = (expr); \ + if(!lval) \ + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,\ + "Failed to allocate "#lval" at %s:%d", \ + __FILE__,__LINE__);\ + } while(0) +#else +#define CHECK_MEM_ERROR(lval,expr) do {\ + lval = (expr); \ + if(!lval) \ + vpx_internal_error(&pbi->common.error, VPX_CODEC_MEM_ERROR,\ + "Failed to allocate "#lval);\ + } while(0) +#endif + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_ONYXD_INT_H_ diff --git a/media/libvpx/vp8/decoder/threading.c b/media/libvpx/vp8/decoder/threading.c new file mode 100644 index 000000000..a76672fa8 --- /dev/null +++ b/media/libvpx/vp8/decoder/threading.c @@ -0,0 +1,915 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#include "vpx_config.h" +#include "vp8_rtcd.h" +#if !defined(WIN32) && CONFIG_OS_SUPPORT == 1 +# include <unistd.h> +#endif +#include "onyxd_int.h" +#include "vpx_mem/vpx_mem.h" +#include "vp8/common/threading.h" + +#include "vp8/common/loopfilter.h" +#include "vp8/common/extend.h" +#include "vpx_ports/vpx_timer.h" +#include "detokenize.h" +#include "vp8/common/reconintra4x4.h" +#include "vp8/common/reconinter.h" +#include "vp8/common/setupintrarecon.h" +#if CONFIG_ERROR_CONCEALMENT +#include "error_concealment.h" +#endif +#ifdef _MSC_VER +#include <intrin.h> +#endif + +#define CALLOC_ARRAY(p, n) CHECK_MEM_ERROR((p), vpx_calloc(sizeof(*(p)), (n))) +#define CALLOC_ARRAY_ALIGNED(p, n, algn) do { \ + CHECK_MEM_ERROR((p), vpx_memalign((algn), sizeof(*(p)) * (n))); \ + memset((p), 0, (n) * sizeof(*(p))); \ +} while (0) + + +void vp8_mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd); + +static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC *mbrd, int count) +{ + VP8_COMMON *const pc = & pbi->common; + int i; + + for (i = 0; i < count; i++) + { + MACROBLOCKD *mbd = &mbrd[i].mbd; + mbd->subpixel_predict = xd->subpixel_predict; + mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; + mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; + mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; + + mbd->mode_info_context = pc->mi + pc->mode_info_stride * (i + 1); + mbd->mode_info_stride = pc->mode_info_stride; + + mbd->frame_type = pc->frame_type; + mbd->pre = xd->pre; + mbd->dst = xd->dst; + + mbd->segmentation_enabled = xd->segmentation_enabled; + mbd->mb_segement_abs_delta = xd->mb_segement_abs_delta; + memcpy(mbd->segment_feature_data, xd->segment_feature_data, sizeof(xd->segment_feature_data)); + + /*signed char ref_lf_deltas[MAX_REF_LF_DELTAS];*/ + memcpy(mbd->ref_lf_deltas, xd->ref_lf_deltas, sizeof(xd->ref_lf_deltas)); + /*signed char mode_lf_deltas[MAX_MODE_LF_DELTAS];*/ + memcpy(mbd->mode_lf_deltas, xd->mode_lf_deltas, sizeof(xd->mode_lf_deltas)); + /*unsigned char mode_ref_lf_delta_enabled; + unsigned char mode_ref_lf_delta_update;*/ + mbd->mode_ref_lf_delta_enabled = xd->mode_ref_lf_delta_enabled; + mbd->mode_ref_lf_delta_update = xd->mode_ref_lf_delta_update; + + mbd->current_bc = &pbi->mbc[0]; + + memcpy(mbd->dequant_y1_dc, xd->dequant_y1_dc, sizeof(xd->dequant_y1_dc)); + memcpy(mbd->dequant_y1, xd->dequant_y1, sizeof(xd->dequant_y1)); + memcpy(mbd->dequant_y2, xd->dequant_y2, sizeof(xd->dequant_y2)); + memcpy(mbd->dequant_uv, xd->dequant_uv, sizeof(xd->dequant_uv)); + + mbd->fullpixel_mask = 0xffffffff; + + if (pc->full_pixel) + mbd->fullpixel_mask = 0xfffffff8; + + } + + for (i = 0; i < pc->mb_rows; i++) + pbi->mt_current_mb_col[i] = -1; +} + +static void mt_decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, + unsigned int mb_idx) +{ + MB_PREDICTION_MODE mode; + int i; +#if CONFIG_ERROR_CONCEALMENT + int corruption_detected = 0; +#else + (void)mb_idx; +#endif + + if (xd->mode_info_context->mbmi.mb_skip_coeff) + { + vp8_reset_mb_tokens_context(xd); + } + else if (!vp8dx_bool_error(xd->current_bc)) + { + int eobtotal; + eobtotal = vp8_decode_mb_tokens(pbi, xd); + + /* Special case: Force the loopfilter to skip when eobtotal is zero */ + xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0); + } + + mode = xd->mode_info_context->mbmi.mode; + + if (xd->segmentation_enabled) + vp8_mb_init_dequantizer(pbi, xd); + + +#if CONFIG_ERROR_CONCEALMENT + + if(pbi->ec_active) + { + int throw_residual; + /* When we have independent partitions we can apply residual even + * though other partitions within the frame are corrupt. + */ + throw_residual = (!pbi->independent_partitions && + pbi->frame_corrupt_residual); + throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc)); + + if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual)) + { + /* MB with corrupt residuals or corrupt mode/motion vectors. + * Better to use the predictor as reconstruction. + */ + pbi->frame_corrupt_residual = 1; + memset(xd->qcoeff, 0, sizeof(xd->qcoeff)); + vp8_conceal_corrupt_mb(xd); + + + corruption_detected = 1; + + /* force idct to be skipped for B_PRED and use the + * prediction only for reconstruction + * */ + memset(xd->eobs, 0, 25); + } + } +#endif + + /* do prediction */ + if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) + { + vp8_build_intra_predictors_mbuv_s(xd, + xd->recon_above[1], + xd->recon_above[2], + xd->recon_left[1], + xd->recon_left[2], + xd->recon_left_stride[1], + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride); + + if (mode != B_PRED) + { + vp8_build_intra_predictors_mby_s(xd, + xd->recon_above[0], + xd->recon_left[0], + xd->recon_left_stride[0], + xd->dst.y_buffer, + xd->dst.y_stride); + } + else + { + short *DQC = xd->dequant_y1; + int dst_stride = xd->dst.y_stride; + + /* clear out residual eob info */ + if(xd->mode_info_context->mbmi.mb_skip_coeff) + memset(xd->eobs, 0, 25); + + intra_prediction_down_copy(xd, xd->recon_above[0] + 16); + + for (i = 0; i < 16; i++) + { + BLOCKD *b = &xd->block[i]; + unsigned char *dst = xd->dst.y_buffer + b->offset; + B_PREDICTION_MODE b_mode = + xd->mode_info_context->bmi[i].as_mode; + unsigned char *Above; + unsigned char *yleft; + int left_stride; + unsigned char top_left; + + /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/ + if (i < 4 && pbi->common.filter_level) + Above = xd->recon_above[0] + b->offset; + else + Above = dst - dst_stride; + + if (i%4==0 && pbi->common.filter_level) + { + yleft = xd->recon_left[0] + i; + left_stride = 1; + } + else + { + yleft = dst - 1; + left_stride = dst_stride; + } + + if ((i==4 || i==8 || i==12) && pbi->common.filter_level) + top_left = *(xd->recon_left[0] + i - 1); + else + top_left = Above[-1]; + + vp8_intra4x4_predict(Above, yleft, left_stride, + b_mode, dst, dst_stride, top_left); + + if (xd->eobs[i] ) + { + if (xd->eobs[i] > 1) + { + vp8_dequant_idct_add(b->qcoeff, DQC, dst, dst_stride); + } + else + { + vp8_dc_only_idct_add(b->qcoeff[0] * DQC[0], + dst, dst_stride, dst, dst_stride); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + } + } + } + } + else + { + vp8_build_inter_predictors_mb(xd); + } + + +#if CONFIG_ERROR_CONCEALMENT + if (corruption_detected) + { + return; + } +#endif + + if(!xd->mode_info_context->mbmi.mb_skip_coeff) + { + /* dequantization and idct */ + if (mode != B_PRED) + { + short *DQC = xd->dequant_y1; + + if (mode != SPLITMV) + { + BLOCKD *b = &xd->block[24]; + + /* do 2nd order transform on the dc block */ + if (xd->eobs[24] > 1) + { + vp8_dequantize_b(b, xd->dequant_y2); + + vp8_short_inv_walsh4x4(&b->dqcoeff[0], + xd->qcoeff); + memset(b->qcoeff, 0, 16 * sizeof(b->qcoeff[0])); + } + else + { + b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0]; + vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], + xd->qcoeff); + memset(b->qcoeff, 0, 2 * sizeof(b->qcoeff[0])); + } + + /* override the dc dequant constant in order to preserve the + * dc components + */ + DQC = xd->dequant_y1_dc; + } + + vp8_dequant_idct_add_y_block + (xd->qcoeff, DQC, + xd->dst.y_buffer, + xd->dst.y_stride, xd->eobs); + } + + vp8_dequant_idct_add_uv_block + (xd->qcoeff+16*16, xd->dequant_uv, + xd->dst.u_buffer, xd->dst.v_buffer, + xd->dst.uv_stride, xd->eobs+16); + } +} + +static void mt_decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row) +{ + volatile const int *last_row_current_mb_col; + volatile int *current_mb_col; + int mb_row; + VP8_COMMON *pc = &pbi->common; + const int nsync = pbi->sync_range; + const int first_row_no_sync_above = pc->mb_cols + nsync; + int num_part = 1 << pbi->common.multi_token_partition; + int last_mb_row = start_mb_row; + + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + YV12_BUFFER_CONFIG *yv12_fb_lst = pbi->dec_fb_ref[LAST_FRAME]; + + int recon_y_stride = yv12_fb_new->y_stride; + int recon_uv_stride = yv12_fb_new->uv_stride; + + unsigned char *ref_buffer[MAX_REF_FRAMES][3]; + unsigned char *dst_buffer[3]; + int i; + int ref_fb_corrupted[MAX_REF_FRAMES]; + + ref_fb_corrupted[INTRA_FRAME] = 0; + + for(i = 1; i < MAX_REF_FRAMES; i++) + { + YV12_BUFFER_CONFIG *this_fb = pbi->dec_fb_ref[i]; + + ref_buffer[i][0] = this_fb->y_buffer; + ref_buffer[i][1] = this_fb->u_buffer; + ref_buffer[i][2] = this_fb->v_buffer; + + ref_fb_corrupted[i] = this_fb->corrupted; + } + + dst_buffer[0] = yv12_fb_new->y_buffer; + dst_buffer[1] = yv12_fb_new->u_buffer; + dst_buffer[2] = yv12_fb_new->v_buffer; + + xd->up_available = (start_mb_row != 0); + + for (mb_row = start_mb_row; mb_row < pc->mb_rows; mb_row += (pbi->decoding_thread_count + 1)) + { + int recon_yoffset, recon_uvoffset; + int mb_col; + int filter_level; + loop_filter_info_n *lfi_n = &pc->lf_info; + + /* save last row processed by this thread */ + last_mb_row = mb_row; + /* select bool coder for current partition */ + xd->current_bc = &pbi->mbc[mb_row%num_part]; + + if (mb_row > 0) + last_row_current_mb_col = &pbi->mt_current_mb_col[mb_row -1]; + else + last_row_current_mb_col = &first_row_no_sync_above; + + current_mb_col = &pbi->mt_current_mb_col[mb_row]; + + recon_yoffset = mb_row * recon_y_stride * 16; + recon_uvoffset = mb_row * recon_uv_stride * 8; + + /* reset contexts */ + xd->above_context = pc->above_context; + memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES)); + + xd->left_available = 0; + + xd->mb_to_top_edge = -((mb_row * 16)) << 3; + xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3; + + if (pbi->common.filter_level) + { + xd->recon_above[0] = pbi->mt_yabove_row[mb_row] + 0*16 +32; + xd->recon_above[1] = pbi->mt_uabove_row[mb_row] + 0*8 +16; + xd->recon_above[2] = pbi->mt_vabove_row[mb_row] + 0*8 +16; + + xd->recon_left[0] = pbi->mt_yleft_col[mb_row]; + xd->recon_left[1] = pbi->mt_uleft_col[mb_row]; + xd->recon_left[2] = pbi->mt_vleft_col[mb_row]; + + /* TODO: move to outside row loop */ + xd->recon_left_stride[0] = 1; + xd->recon_left_stride[1] = 1; + } + else + { + xd->recon_above[0] = dst_buffer[0] + recon_yoffset; + xd->recon_above[1] = dst_buffer[1] + recon_uvoffset; + xd->recon_above[2] = dst_buffer[2] + recon_uvoffset; + + xd->recon_left[0] = xd->recon_above[0] - 1; + xd->recon_left[1] = xd->recon_above[1] - 1; + xd->recon_left[2] = xd->recon_above[2] - 1; + + xd->recon_above[0] -= xd->dst.y_stride; + xd->recon_above[1] -= xd->dst.uv_stride; + xd->recon_above[2] -= xd->dst.uv_stride; + + /* TODO: move to outside row loop */ + xd->recon_left_stride[0] = xd->dst.y_stride; + xd->recon_left_stride[1] = xd->dst.uv_stride; + + setup_intra_recon_left(xd->recon_left[0], xd->recon_left[1], + xd->recon_left[2], xd->dst.y_stride, + xd->dst.uv_stride); + } + + for (mb_col = 0; mb_col < pc->mb_cols; mb_col++) + { + *current_mb_col = mb_col - 1; + + if ((mb_col & (nsync - 1)) == 0) + { + while (mb_col > (*last_row_current_mb_col - nsync)) + { + x86_pause_hint(); + thread_sleep(0); + } + } + + /* Distance of MB to the various image edges. + * These are specified to 8th pel as they are always + * compared to values that are in 1/8th pel units. + */ + xd->mb_to_left_edge = -((mb_col * 16) << 3); + xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3; + + #if CONFIG_ERROR_CONCEALMENT + { + int corrupt_residual = + (!pbi->independent_partitions && + pbi->frame_corrupt_residual) || + vp8dx_bool_error(xd->current_bc); + if (pbi->ec_active && + (xd->mode_info_context->mbmi.ref_frame == + INTRA_FRAME) && + corrupt_residual) + { + /* We have an intra block with corrupt + * coefficients, better to conceal with an inter + * block. + * Interpolate MVs from neighboring MBs + * + * Note that for the first mb with corrupt + * residual in a frame, we might not discover + * that before decoding the residual. That + * happens after this check, and therefore no + * inter concealment will be done. + */ + vp8_interpolate_motion(xd, + mb_row, mb_col, + pc->mb_rows, pc->mb_cols, + pc->mode_info_stride); + } + } + #endif + + + xd->dst.y_buffer = dst_buffer[0] + recon_yoffset; + xd->dst.u_buffer = dst_buffer[1] + recon_uvoffset; + xd->dst.v_buffer = dst_buffer[2] + recon_uvoffset; + + xd->pre.y_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][0] + recon_yoffset; + xd->pre.u_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][1] + recon_uvoffset; + xd->pre.v_buffer = ref_buffer[xd->mode_info_context->mbmi.ref_frame][2] + recon_uvoffset; + + /* propagate errors from reference frames */ + xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame]; + + mt_decode_macroblock(pbi, xd, 0); + + xd->left_available = 1; + + /* check if the boolean decoder has suffered an error */ + xd->corrupted |= vp8dx_bool_error(xd->current_bc); + + xd->recon_above[0] += 16; + xd->recon_above[1] += 8; + xd->recon_above[2] += 8; + + if (!pbi->common.filter_level) + { + xd->recon_left[0] += 16; + xd->recon_left[1] += 8; + xd->recon_left[2] += 8; + } + + if (pbi->common.filter_level) + { + int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED && + xd->mode_info_context->mbmi.mode != SPLITMV && + xd->mode_info_context->mbmi.mb_skip_coeff); + + const int mode_index = lfi_n->mode_lf_lut[xd->mode_info_context->mbmi.mode]; + const int seg = xd->mode_info_context->mbmi.segment_id; + const int ref_frame = xd->mode_info_context->mbmi.ref_frame; + + filter_level = lfi_n->lvl[seg][ref_frame][mode_index]; + + if( mb_row != pc->mb_rows-1 ) + { + /* Save decoded MB last row data for next-row decoding */ + memcpy((pbi->mt_yabove_row[mb_row + 1] + 32 + mb_col*16), (xd->dst.y_buffer + 15 * recon_y_stride), 16); + memcpy((pbi->mt_uabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.u_buffer + 7 * recon_uv_stride), 8); + memcpy((pbi->mt_vabove_row[mb_row + 1] + 16 + mb_col*8), (xd->dst.v_buffer + 7 * recon_uv_stride), 8); + } + + /* save left_col for next MB decoding */ + if(mb_col != pc->mb_cols-1) + { + MODE_INFO *next = xd->mode_info_context +1; + + if (next->mbmi.ref_frame == INTRA_FRAME) + { + for (i = 0; i < 16; i++) + pbi->mt_yleft_col[mb_row][i] = xd->dst.y_buffer [i* recon_y_stride + 15]; + for (i = 0; i < 8; i++) + { + pbi->mt_uleft_col[mb_row][i] = xd->dst.u_buffer [i* recon_uv_stride + 7]; + pbi->mt_vleft_col[mb_row][i] = xd->dst.v_buffer [i* recon_uv_stride + 7]; + } + } + } + + /* loopfilter on this macroblock. */ + if (filter_level) + { + if(pc->filter_type == NORMAL_LOOPFILTER) + { + loop_filter_info lfi; + FRAME_TYPE frame_type = pc->frame_type; + const int hev_index = lfi_n->hev_thr_lut[frame_type][filter_level]; + lfi.mblim = lfi_n->mblim[filter_level]; + lfi.blim = lfi_n->blim[filter_level]; + lfi.lim = lfi_n->lim[filter_level]; + lfi.hev_thr = lfi_n->hev_thr[hev_index]; + + if (mb_col > 0) + vp8_loop_filter_mbv + (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); + + if (!skip_lf) + vp8_loop_filter_bv + (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); + + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_mbh + (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); + + if (!skip_lf) + vp8_loop_filter_bh + (xd->dst.y_buffer, xd->dst.u_buffer, xd->dst.v_buffer, recon_y_stride, recon_uv_stride, &lfi); + } + else + { + if (mb_col > 0) + vp8_loop_filter_simple_mbv + (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[filter_level]); + + if (!skip_lf) + vp8_loop_filter_simple_bv + (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[filter_level]); + + /* don't apply across umv border */ + if (mb_row > 0) + vp8_loop_filter_simple_mbh + (xd->dst.y_buffer, recon_y_stride, lfi_n->mblim[filter_level]); + + if (!skip_lf) + vp8_loop_filter_simple_bh + (xd->dst.y_buffer, recon_y_stride, lfi_n->blim[filter_level]); + } + } + + } + + recon_yoffset += 16; + recon_uvoffset += 8; + + ++xd->mode_info_context; /* next mb */ + + xd->above_context++; + } + + /* adjust to the next row of mbs */ + if (pbi->common.filter_level) + { + if(mb_row != pc->mb_rows-1) + { + int lasty = yv12_fb_lst->y_width + VP8BORDERINPIXELS; + int lastuv = (yv12_fb_lst->y_width>>1) + (VP8BORDERINPIXELS>>1); + + for (i = 0; i < 4; i++) + { + pbi->mt_yabove_row[mb_row +1][lasty + i] = pbi->mt_yabove_row[mb_row +1][lasty -1]; + pbi->mt_uabove_row[mb_row +1][lastuv + i] = pbi->mt_uabove_row[mb_row +1][lastuv -1]; + pbi->mt_vabove_row[mb_row +1][lastuv + i] = pbi->mt_vabove_row[mb_row +1][lastuv -1]; + } + } + } + else + vp8_extend_mb_row(yv12_fb_new, xd->dst.y_buffer + 16, + xd->dst.u_buffer + 8, xd->dst.v_buffer + 8); + + /* last MB of row is ready just after extension is done */ + *current_mb_col = mb_col + nsync; + + ++xd->mode_info_context; /* skip prediction column */ + xd->up_available = 1; + + /* since we have multithread */ + xd->mode_info_context += xd->mode_info_stride * pbi->decoding_thread_count; + } + + /* signal end of frame decoding if this thread processed the last mb_row */ + if (last_mb_row == (pc->mb_rows - 1)) + sem_post(&pbi->h_event_end_decoding); + +} + + +static THREAD_FUNCTION thread_decoding_proc(void *p_data) +{ + int ithread = ((DECODETHREAD_DATA *)p_data)->ithread; + VP8D_COMP *pbi = (VP8D_COMP *)(((DECODETHREAD_DATA *)p_data)->ptr1); + MB_ROW_DEC *mbrd = (MB_ROW_DEC *)(((DECODETHREAD_DATA *)p_data)->ptr2); + ENTROPY_CONTEXT_PLANES mb_row_left_context; + + while (1) + { + if (pbi->b_multithreaded_rd == 0) + break; + + if (sem_wait(&pbi->h_event_start_decoding[ithread]) == 0) + { + if (pbi->b_multithreaded_rd == 0) + break; + else + { + MACROBLOCKD *xd = &mbrd->mbd; + xd->left_context = &mb_row_left_context; + + mt_decode_mb_rows(pbi, xd, ithread+1); + } + } + } + + return 0 ; +} + + +void vp8_decoder_create_threads(VP8D_COMP *pbi) +{ + int core_count = 0; + unsigned int ithread; + + pbi->b_multithreaded_rd = 0; + pbi->allocated_decoding_thread_count = 0; + + /* limit decoding threads to the max number of token partitions */ + core_count = (pbi->max_threads > 8) ? 8 : pbi->max_threads; + + /* limit decoding threads to the available cores */ + if (core_count > pbi->common.processor_core_count) + core_count = pbi->common.processor_core_count; + + if (core_count > 1) + { + pbi->b_multithreaded_rd = 1; + pbi->decoding_thread_count = core_count - 1; + + CALLOC_ARRAY(pbi->h_decoding_thread, pbi->decoding_thread_count); + CALLOC_ARRAY(pbi->h_event_start_decoding, pbi->decoding_thread_count); + CALLOC_ARRAY_ALIGNED(pbi->mb_row_di, pbi->decoding_thread_count, 32); + CALLOC_ARRAY(pbi->de_thread_data, pbi->decoding_thread_count); + + for (ithread = 0; ithread < pbi->decoding_thread_count; ithread++) + { + sem_init(&pbi->h_event_start_decoding[ithread], 0, 0); + + vp8_setup_block_dptrs(&pbi->mb_row_di[ithread].mbd); + + pbi->de_thread_data[ithread].ithread = ithread; + pbi->de_thread_data[ithread].ptr1 = (void *)pbi; + pbi->de_thread_data[ithread].ptr2 = (void *) &pbi->mb_row_di[ithread]; + + pthread_create(&pbi->h_decoding_thread[ithread], 0, thread_decoding_proc, (&pbi->de_thread_data[ithread])); + } + + sem_init(&pbi->h_event_end_decoding, 0, 0); + + pbi->allocated_decoding_thread_count = pbi->decoding_thread_count; + } +} + + +void vp8mt_de_alloc_temp_buffers(VP8D_COMP *pbi, int mb_rows) +{ + int i; + + if (pbi->b_multithreaded_rd) + { + vpx_free(pbi->mt_current_mb_col); + pbi->mt_current_mb_col = NULL ; + + /* Free above_row buffers. */ + if (pbi->mt_yabove_row) + { + for (i=0; i< mb_rows; i++) + { + vpx_free(pbi->mt_yabove_row[i]); + pbi->mt_yabove_row[i] = NULL ; + } + vpx_free(pbi->mt_yabove_row); + pbi->mt_yabove_row = NULL ; + } + + if (pbi->mt_uabove_row) + { + for (i=0; i< mb_rows; i++) + { + vpx_free(pbi->mt_uabove_row[i]); + pbi->mt_uabove_row[i] = NULL ; + } + vpx_free(pbi->mt_uabove_row); + pbi->mt_uabove_row = NULL ; + } + + if (pbi->mt_vabove_row) + { + for (i=0; i< mb_rows; i++) + { + vpx_free(pbi->mt_vabove_row[i]); + pbi->mt_vabove_row[i] = NULL ; + } + vpx_free(pbi->mt_vabove_row); + pbi->mt_vabove_row = NULL ; + } + + /* Free left_col buffers. */ + if (pbi->mt_yleft_col) + { + for (i=0; i< mb_rows; i++) + { + vpx_free(pbi->mt_yleft_col[i]); + pbi->mt_yleft_col[i] = NULL ; + } + vpx_free(pbi->mt_yleft_col); + pbi->mt_yleft_col = NULL ; + } + + if (pbi->mt_uleft_col) + { + for (i=0; i< mb_rows; i++) + { + vpx_free(pbi->mt_uleft_col[i]); + pbi->mt_uleft_col[i] = NULL ; + } + vpx_free(pbi->mt_uleft_col); + pbi->mt_uleft_col = NULL ; + } + + if (pbi->mt_vleft_col) + { + for (i=0; i< mb_rows; i++) + { + vpx_free(pbi->mt_vleft_col[i]); + pbi->mt_vleft_col[i] = NULL ; + } + vpx_free(pbi->mt_vleft_col); + pbi->mt_vleft_col = NULL ; + } + } +} + + +void vp8mt_alloc_temp_buffers(VP8D_COMP *pbi, int width, int prev_mb_rows) +{ + VP8_COMMON *const pc = & pbi->common; + int i; + int uv_width; + + if (pbi->b_multithreaded_rd) + { + vp8mt_de_alloc_temp_buffers(pbi, prev_mb_rows); + + /* our internal buffers are always multiples of 16 */ + if ((width & 0xf) != 0) + width += 16 - (width & 0xf); + + if (width < 640) pbi->sync_range = 1; + else if (width <= 1280) pbi->sync_range = 8; + else if (width <= 2560) pbi->sync_range =16; + else pbi->sync_range = 32; + + uv_width = width >>1; + + /* Allocate an int for each mb row. */ + CALLOC_ARRAY(pbi->mt_current_mb_col, pc->mb_rows); + + /* Allocate memory for above_row buffers. */ + CALLOC_ARRAY(pbi->mt_yabove_row, pc->mb_rows); + for (i = 0; i < pc->mb_rows; i++) + CHECK_MEM_ERROR(pbi->mt_yabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (width + (VP8BORDERINPIXELS<<1)))); + + CALLOC_ARRAY(pbi->mt_uabove_row, pc->mb_rows); + for (i = 0; i < pc->mb_rows; i++) + CHECK_MEM_ERROR(pbi->mt_uabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS))); + + CALLOC_ARRAY(pbi->mt_vabove_row, pc->mb_rows); + for (i = 0; i < pc->mb_rows; i++) + CHECK_MEM_ERROR(pbi->mt_vabove_row[i], vpx_memalign(16,sizeof(unsigned char) * (uv_width + VP8BORDERINPIXELS))); + + /* Allocate memory for left_col buffers. */ + CALLOC_ARRAY(pbi->mt_yleft_col, pc->mb_rows); + for (i = 0; i < pc->mb_rows; i++) + CHECK_MEM_ERROR(pbi->mt_yleft_col[i], vpx_calloc(sizeof(unsigned char) * 16, 1)); + + CALLOC_ARRAY(pbi->mt_uleft_col, pc->mb_rows); + for (i = 0; i < pc->mb_rows; i++) + CHECK_MEM_ERROR(pbi->mt_uleft_col[i], vpx_calloc(sizeof(unsigned char) * 8, 1)); + + CALLOC_ARRAY(pbi->mt_vleft_col, pc->mb_rows); + for (i = 0; i < pc->mb_rows; i++) + CHECK_MEM_ERROR(pbi->mt_vleft_col[i], vpx_calloc(sizeof(unsigned char) * 8, 1)); + } +} + + +void vp8_decoder_remove_threads(VP8D_COMP *pbi) +{ + /* shutdown MB Decoding thread; */ + if (pbi->b_multithreaded_rd) + { + int i; + + pbi->b_multithreaded_rd = 0; + + /* allow all threads to exit */ + for (i = 0; i < pbi->allocated_decoding_thread_count; i++) + { + sem_post(&pbi->h_event_start_decoding[i]); + pthread_join(pbi->h_decoding_thread[i], NULL); + } + + for (i = 0; i < pbi->allocated_decoding_thread_count; i++) + { + sem_destroy(&pbi->h_event_start_decoding[i]); + } + + sem_destroy(&pbi->h_event_end_decoding); + + vpx_free(pbi->h_decoding_thread); + pbi->h_decoding_thread = NULL; + + vpx_free(pbi->h_event_start_decoding); + pbi->h_event_start_decoding = NULL; + + vpx_free(pbi->mb_row_di); + pbi->mb_row_di = NULL ; + + vpx_free(pbi->de_thread_data); + pbi->de_thread_data = NULL; + } +} + +void vp8mt_decode_mb_rows( VP8D_COMP *pbi, MACROBLOCKD *xd) +{ + VP8_COMMON *pc = &pbi->common; + unsigned int i; + int j; + + int filter_level = pc->filter_level; + YV12_BUFFER_CONFIG *yv12_fb_new = pbi->dec_fb_ref[INTRA_FRAME]; + + if (filter_level) + { + /* Set above_row buffer to 127 for decoding first MB row */ + memset(pbi->mt_yabove_row[0] + VP8BORDERINPIXELS-1, 127, yv12_fb_new->y_width + 5); + memset(pbi->mt_uabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5); + memset(pbi->mt_vabove_row[0] + (VP8BORDERINPIXELS>>1)-1, 127, (yv12_fb_new->y_width>>1) +5); + + for (j=1; j<pc->mb_rows; j++) + { + memset(pbi->mt_yabove_row[j] + VP8BORDERINPIXELS-1, (unsigned char)129, 1); + memset(pbi->mt_uabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1); + memset(pbi->mt_vabove_row[j] + (VP8BORDERINPIXELS>>1)-1, (unsigned char)129, 1); + } + + /* Set left_col to 129 initially */ + for (j=0; j<pc->mb_rows; j++) + { + memset(pbi->mt_yleft_col[j], (unsigned char)129, 16); + memset(pbi->mt_uleft_col[j], (unsigned char)129, 8); + memset(pbi->mt_vleft_col[j], (unsigned char)129, 8); + } + + /* Initialize the loop filter for this frame. */ + vp8_loop_filter_frame_init(pc, &pbi->mb, filter_level); + } + else + vp8_setup_intra_recon_top_line(yv12_fb_new); + + setup_decoding_thread_data(pbi, xd, pbi->mb_row_di, pbi->decoding_thread_count); + + for (i = 0; i < pbi->decoding_thread_count; i++) + sem_post(&pbi->h_event_start_decoding[i]); + + mt_decode_mb_rows(pbi, xd, 0); + + sem_wait(&pbi->h_event_end_decoding); /* add back for each frame */ +} diff --git a/media/libvpx/vp8/decoder/treereader.h b/media/libvpx/vp8/decoder/treereader.h new file mode 100644 index 000000000..35ee69600 --- /dev/null +++ b/media/libvpx/vp8/decoder/treereader.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2010 The WebM project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + + +#ifndef VP8_DECODER_TREEREADER_H_ +#define VP8_DECODER_TREEREADER_H_ + +#include "vp8/common/treecoder.h" +#include "dboolhuff.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef BOOL_DECODER vp8_reader; + +#define vp8_read vp8dx_decode_bool +#define vp8_read_literal vp8_decode_value +#define vp8_read_bit(R) vp8_read(R, vp8_prob_half) + + +/* Intent of tree data structure is to make decoding trivial. */ + +static int vp8_treed_read( + vp8_reader *const r, /* !!! must return a 0 or 1 !!! */ + vp8_tree t, + const vp8_prob *const p +) +{ + register vp8_tree_index i = 0; + + while ((i = t[ i + vp8_read(r, p[i>>1])]) > 0) ; + + return -i; +} + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // VP8_DECODER_TREEREADER_H_ |