diff options
Diffstat (limited to 'third_party/aom/av1/decoder')
-rw-r--r-- | third_party/aom/av1/decoder/decodeframe.c | 610 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/decodemv.c | 650 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/decoder.c | 2 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/decoder.h | 2 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/decodetxb.c | 2 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/detokenize.c | 122 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/inspection.c | 13 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/inspection.h | 5 | ||||
-rw-r--r-- | third_party/aom/av1/decoder/laplace_decoder.c | 4 |
9 files changed, 658 insertions, 752 deletions
diff --git a/third_party/aom/av1/decoder/decodeframe.c b/third_party/aom/av1/decoder/decodeframe.c index 289d38670..610519981 100644 --- a/third_party/aom/av1/decoder/decodeframe.c +++ b/third_party/aom/av1/decoder/decodeframe.c @@ -92,7 +92,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi, struct aom_read_bit_buffer *rb); static int is_compound_reference_allowed(const AV1_COMMON *cm) { -#if CONFIG_LOWDELAY_COMPOUND // Normative in decoder +#if CONFIG_ONE_SIDED_COMPOUND // Normative in decoder return !frame_is_intra_only(cm); #else int i; @@ -190,7 +190,6 @@ static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) { #endif static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) { -#if CONFIG_REF_MV int i; for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i) av1_diff_update_prob(r, &fc->newmv_prob[i], ACCT_STR); @@ -200,18 +199,6 @@ static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) { av1_diff_update_prob(r, &fc->refmv_prob[i], ACCT_STR); for (i = 0; i < DRL_MODE_CONTEXTS; ++i) av1_diff_update_prob(r, &fc->drl_prob[i], ACCT_STR); -#else -#if !CONFIG_EC_ADAPT - int i, j; - for (i = 0; i < INTER_MODE_CONTEXTS; ++i) { - for (j = 0; j < INTER_MODES - 1; ++j) - av1_diff_update_prob(r, &fc->inter_mode_probs[i][j], ACCT_STR); - } -#else - (void)fc; - (void)r; -#endif -#endif } #if CONFIG_EXT_INTER @@ -367,9 +354,7 @@ static int av1_pvq_decode_helper(MACROBLOCKD *xd, tran_low_t *ref_coeff, od_coeff ref_int32[OD_TXSIZE_MAX * OD_TXSIZE_MAX]; od_coeff out_int32[OD_TXSIZE_MAX * OD_TXSIZE_MAX]; -#if CONFIG_HIGHBITDEPTH hbd_downshift = xd->bd - 8; -#endif // CONFIG_HIGHBITDEPTH od_raster_to_coding_order(ref_coeff_pvq, blk_size, tx_type, ref_coeff, blk_size); @@ -533,6 +518,133 @@ static int get_block_idx(const MACROBLOCKD *xd, int plane, int row, int col) { return row * max_blocks_wide + col * txh_unit; } +#if CONFIG_DPCM_INTRA +static void process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d, + const tran_low_t *dqcoeff, uint8_t *dst, + int dst_stride) { + const int tx1d_width = tx_size_wide[tx_size]; + const int tx1d_height = tx_size_high[tx_size]; + dpcm_inv_txfm_add_func inverse_tx = + av1_get_dpcm_inv_txfm_add_func(tx1d_width); + for (int r = 0; r < tx1d_height; ++r) { + if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0])); + inverse_tx(dqcoeff, 1, tx_type_1d, dst); + dqcoeff += tx1d_width; + dst += dst_stride; + } +} + +static void process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d, + const tran_low_t *dqcoeff, uint8_t *dst, + int dst_stride) { + const int tx1d_width = tx_size_wide[tx_size]; + const int tx1d_height = tx_size_high[tx_size]; + dpcm_inv_txfm_add_func inverse_tx = + av1_get_dpcm_inv_txfm_add_func(tx1d_height); + tran_low_t tx_buff[64]; + for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) { + for (int r = 0; r < tx1d_height; ++r) { + if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1]; + tx_buff[r] = dqcoeff[r * tx1d_width]; + } + inverse_tx(tx_buff, dst_stride, tx_type_1d, dst); + } +} + +#if CONFIG_HIGHBITDEPTH +static void hbd_process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d, + int bd, const tran_low_t *dqcoeff, + uint8_t *dst8, int dst_stride) { + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); + const int tx1d_width = tx_size_wide[tx_size]; + const int tx1d_height = tx_size_high[tx_size]; + hbd_dpcm_inv_txfm_add_func inverse_tx = + av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_width); + for (int r = 0; r < tx1d_height; ++r) { + if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0])); + inverse_tx(dqcoeff, 1, tx_type_1d, bd, dst); + dqcoeff += tx1d_width; + dst += dst_stride; + } +} + +static void hbd_process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d, + int bd, const tran_low_t *dqcoeff, + uint8_t *dst8, int dst_stride) { + uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); + const int tx1d_width = tx_size_wide[tx_size]; + const int tx1d_height = tx_size_high[tx_size]; + hbd_dpcm_inv_txfm_add_func inverse_tx = + av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_height); + tran_low_t tx_buff[64]; + switch (tx1d_height) { + case 4: inverse_tx = av1_hbd_dpcm_inv_txfm_add_4_c; break; + case 8: inverse_tx = av1_hbd_dpcm_inv_txfm_add_8_c; break; + case 16: inverse_tx = av1_hbd_dpcm_inv_txfm_add_16_c; break; + case 32: inverse_tx = av1_hbd_dpcm_inv_txfm_add_32_c; break; + default: assert(0); + } + + for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) { + for (int r = 0; r < tx1d_height; ++r) { + if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1]; + tx_buff[r] = dqcoeff[r * tx1d_width]; + } + inverse_tx(tx_buff, dst_stride, tx_type_1d, bd, dst); + } +} +#endif // CONFIG_HIGHBITDEPTH + +static void inverse_transform_block_dpcm(MACROBLOCKD *xd, int plane, + PREDICTION_MODE mode, TX_SIZE tx_size, + TX_TYPE tx_type, uint8_t *dst, + int dst_stride, int16_t scan_line) { + struct macroblockd_plane *const pd = &xd->plane[plane]; + tran_low_t *const dqcoeff = pd->dqcoeff; + TX_TYPE_1D tx_type_1d = DCT_1D; + switch (tx_type) { + case IDTX: tx_type_1d = IDTX_1D; break; + case V_DCT: + assert(mode == H_PRED); + tx_type_1d = DCT_1D; + break; + case H_DCT: + assert(mode == V_PRED); + tx_type_1d = DCT_1D; + break; + default: assert(0); + } + switch (mode) { + case V_PRED: +#if CONFIG_HIGHBITDEPTH + if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { + hbd_process_block_dpcm_vert(tx_size, tx_type_1d, xd->bd, dqcoeff, dst, + dst_stride); + } else { +#endif // CONFIG_HIGHBITDEPTH + process_block_dpcm_vert(tx_size, tx_type_1d, dqcoeff, dst, dst_stride); +#if CONFIG_HIGHBITDEPTH + } +#endif // CONFIG_HIGHBITDEPTH + break; + case H_PRED: +#if CONFIG_HIGHBITDEPTH + if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { + hbd_process_block_dpcm_horz(tx_size, tx_type_1d, xd->bd, dqcoeff, dst, + dst_stride); + } else { +#endif // CONFIG_HIGHBITDEPTH + process_block_dpcm_horz(tx_size, tx_type_1d, dqcoeff, dst, dst_stride); +#if CONFIG_HIGHBITDEPTH + } +#endif // CONFIG_HIGHBITDEPTH + break; + default: assert(0); + } + memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0])); +} +#endif // CONFIG_DPCM_INTRA + static void predict_and_reconstruct_intra_block( AV1_COMMON *cm, MACROBLOCKD *const xd, aom_reader *const r, MB_MODE_INFO *const mbmi, int plane, int row, int col, TX_SIZE tx_size) { @@ -564,8 +676,22 @@ static void predict_and_reconstruct_intra_block( if (eob) { uint8_t *dst = &pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]]; - inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride, - max_scan_line, eob); +#if CONFIG_DPCM_INTRA + const int block_raster_idx = + av1_block_index_to_raster_order(tx_size, block_idx); + const PREDICTION_MODE mode = (plane == 0) + ? get_y_mode(xd->mi[0], block_raster_idx) + : mbmi->uv_mode; + if (av1_use_dpcm_intra(plane, mode, tx_type, mbmi)) { + inverse_transform_block_dpcm(xd, plane, mode, tx_size, tx_type, dst, + pd->dst.stride, max_scan_line); + } else { +#endif // CONFIG_DPCM_INTRA + inverse_transform_block(xd, plane, tx_type, tx_size, dst, + pd->dst.stride, max_scan_line, eob); +#if CONFIG_DPCM_INTRA + } +#endif // CONFIG_DPCM_INTRA } #else TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, tx_size); @@ -605,10 +731,9 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd, PLANE_TYPE plane_type = get_plane_type(plane); int block_idx = get_block_idx(xd, plane, blk_row, blk_col); #if CONFIG_LV_MAP - (void)segment_id; int16_t max_scan_line = 0; int eob; - av1_read_coeffs_txb_facade(cm, xd, r, row, col, block_idx, plane, + av1_read_coeffs_txb_facade(cm, xd, r, blk_row, blk_col, block_idx, plane, pd->dqcoeff, &max_scan_line, &eob); // tx_type will be read out in av1_read_coeffs_txb_facade TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx, plane_tx_size); @@ -628,6 +753,7 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd, } else { const TX_SIZE sub_txs = sub_tx_size_map[tx_size]; const int bsl = tx_size_wide_unit[sub_txs]; + assert(sub_txs < tx_size); int i; assert(bsl > 0); @@ -890,13 +1016,13 @@ static void dec_predict_b_extend( (c >> xd->plane[2].subsampling_x); if (!b_sub8x8) - av1_build_inter_predictors_sb_extend(xd, + av1_build_inter_predictors_sb_extend(&pbi->common, xd, #if CONFIG_EXT_INTER mi_row_ori, mi_col_ori, #endif // CONFIG_EXT_INTER mi_row_pred, mi_col_pred, bsize_pred); else - av1_build_inter_predictors_sb_sub8x8_extend(xd, + av1_build_inter_predictors_sb_sub8x8_extend(&pbi->common, xd, #if CONFIG_EXT_INTER mi_row_ori, mi_col_ori, #endif // CONFIG_EXT_INTER @@ -1584,9 +1710,11 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi, #endif #if CONFIG_CB4X4 - if (mbmi->skip) reset_skip_context(xd, bsize); + if (mbmi->skip) av1_reset_skip_context(xd, mi_row, mi_col, bsize); #else - if (mbmi->skip) reset_skip_context(xd, AOMMAX(BLOCK_8X8, bsize)); + if (mbmi->skip) { + av1_reset_skip_context(xd, mi_row, mi_col, AOMMAX(BLOCK_8X8, bsize)); + } #endif #if CONFIG_COEF_INTERLEAVE @@ -1660,7 +1788,7 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi, } } else { // Prediction - av1_build_inter_predictors_sb(xd, mi_row, mi_col, NULL, + av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, AOMMAX(bsize, BLOCK_8X8)); // Reconstruction @@ -1770,9 +1898,9 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi, } #if CONFIG_CB4X4 - av1_build_inter_predictors_sb(xd, mi_row, mi_col, NULL, bsize); + av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, bsize); #else - av1_build_inter_predictors_sb(xd, mi_row, mi_col, NULL, + av1_build_inter_predictors_sb(cm, xd, mi_row, mi_col, NULL, AOMMAX(bsize, BLOCK_8X8)); #endif @@ -1966,38 +2094,23 @@ static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd, #if CONFIG_EC_ADAPT FRAME_CONTEXT *ec_ctx = xd->tile_ctx; (void)cm; -#elif CONFIG_EC_MULTISYMBOL +#else FRAME_CONTEXT *ec_ctx = cm->fc; #endif -#if CONFIG_EC_MULTISYMBOL aom_cdf_prob *partition_cdf = (ctx >= 0) ? ec_ctx->partition_cdf[ctx] : NULL; -#endif if (has_rows && has_cols) #if CONFIG_EXT_PARTITION_TYPES if (bsize <= BLOCK_8X8) -#if CONFIG_EC_MULTISYMBOL p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, PARTITION_TYPES, ACCT_STR); -#else - p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs, ACCT_STR); -#endif else -#if CONFIG_EC_MULTISYMBOL p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, EXT_PARTITION_TYPES, ACCT_STR); #else - p = (PARTITION_TYPE)aom_read_tree(r, av1_ext_partition_tree, probs, - ACCT_STR); -#endif -#else -#if CONFIG_EC_MULTISYMBOL p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, PARTITION_TYPES, ACCT_STR); -#else - p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs, ACCT_STR); -#endif #endif // CONFIG_EXT_PARTITION_TYPES else if (!has_rows && has_cols) p = aom_read(r, probs[1], ACCT_STR) ? PARTITION_SPLIT : PARTITION_HORZ; @@ -2283,7 +2396,7 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd, set_skip_context(xd, mi_row, mi_col); skip = read_skip(cm, xd, xd->mi[0]->mbmi.segment_id_supertx, r); if (skip) { - reset_skip_context(xd, bsize); + av1_reset_skip_context(xd, mi_row, mi_col, bsize); } else { #if CONFIG_EXT_TX if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) > @@ -2353,19 +2466,7 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd, #endif // CONFIG_EXT_PARTITION_TYPES #if CONFIG_CDEF -#if CONFIG_EXT_PARTITION - if (cm->sb_size == BLOCK_128X128 && bsize == BLOCK_128X128) { - if (!sb_all_skip(cm, mi_row, mi_col)) { - cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.cdef_strength = - aom_read_literal(r, cm->cdef_bits, ACCT_STR); - } else { - cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.cdef_strength = - 0; - } - } else if (cm->sb_size == BLOCK_64X64 && bsize == BLOCK_64X64) { -#else - if (bsize == BLOCK_64X64) { -#endif + if (bsize == cm->sb_size) { if (!sb_all_skip(cm, mi_row, mi_col)) { cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.cdef_strength = aom_read_literal(r, cm->cdef_bits, ACCT_STR); @@ -2400,7 +2501,7 @@ static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end, "Failed to allocate bool decoder %d", 1); } -#if !CONFIG_PVQ && !(CONFIG_EC_ADAPT && CONFIG_NEW_TOKENSET) && !CONFIG_LV_MAP +#if !CONFIG_PVQ && !CONFIG_EC_ADAPT && !CONFIG_LV_MAP static void read_coef_probs_common(av1_coeff_probs_model *coef_probs, aom_reader *r) { int i, j, k, l, m; @@ -2488,8 +2589,13 @@ static void decode_restoration_mode(AV1_COMMON *cm, aom_rb_read_bit(rb) ? RESTORE_SWITCHABLE : RESTORE_NONE; } for (p = 1; p < MAX_MB_PLANE; ++p) { - cm->rst_info[p].frame_restoration_type = - aom_rb_read_bit(rb) ? RESTORE_WIENER : RESTORE_NONE; + rsi = &cm->rst_info[p]; + if (aom_rb_read_bit(rb)) { + rsi->frame_restoration_type = + aom_rb_read_bit(rb) ? RESTORE_SGRPROJ : RESTORE_WIENER; + } else { + rsi->frame_restoration_type = RESTORE_NONE; + } } cm->rst_info[0].restoration_tilesize = RESTORATION_TILESIZE_MAX; @@ -2514,19 +2620,19 @@ static void read_wiener_filter(WienerInfo *wiener_info, aom_read_primitive_refsubexpfin( rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1, WIENER_FILT_TAP0_SUBEXP_K, - ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV) + + ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) + WIENER_FILT_TAP0_MINV; wiener_info->vfilter[1] = wiener_info->vfilter[WIENER_WIN - 2] = aom_read_primitive_refsubexpfin( rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1, WIENER_FILT_TAP1_SUBEXP_K, - ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV) + + ref_wiener_info->vfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) + WIENER_FILT_TAP1_MINV; wiener_info->vfilter[2] = wiener_info->vfilter[WIENER_WIN - 3] = aom_read_primitive_refsubexpfin( rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1, WIENER_FILT_TAP2_SUBEXP_K, - ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV) + + ref_wiener_info->vfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) + WIENER_FILT_TAP2_MINV; // The central element has an implicit +WIENER_FILT_STEP wiener_info->vfilter[WIENER_HALFWIN] = @@ -2537,19 +2643,19 @@ static void read_wiener_filter(WienerInfo *wiener_info, aom_read_primitive_refsubexpfin( rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1, WIENER_FILT_TAP0_SUBEXP_K, - ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV) + + ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) + WIENER_FILT_TAP0_MINV; wiener_info->hfilter[1] = wiener_info->hfilter[WIENER_WIN - 2] = aom_read_primitive_refsubexpfin( rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1, WIENER_FILT_TAP1_SUBEXP_K, - ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV) + + ref_wiener_info->hfilter[1] - WIENER_FILT_TAP1_MINV, ACCT_STR) + WIENER_FILT_TAP1_MINV; wiener_info->hfilter[2] = wiener_info->hfilter[WIENER_WIN - 3] = aom_read_primitive_refsubexpfin( rb, WIENER_FILT_TAP2_MAXV - WIENER_FILT_TAP2_MINV + 1, WIENER_FILT_TAP2_SUBEXP_K, - ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV) + + ref_wiener_info->hfilter[2] - WIENER_FILT_TAP2_MINV, ACCT_STR) + WIENER_FILT_TAP2_MINV; // The central element has an implicit +WIENER_FILT_STEP wiener_info->hfilter[WIENER_HALFWIN] = @@ -2564,12 +2670,12 @@ static void read_sgrproj_filter(SgrprojInfo *sgrproj_info, sgrproj_info->xqd[0] = aom_read_primitive_refsubexpfin( rb, SGRPROJ_PRJ_MAX0 - SGRPROJ_PRJ_MIN0 + 1, SGRPROJ_PRJ_SUBEXP_K, - ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0) + + ref_sgrproj_info->xqd[0] - SGRPROJ_PRJ_MIN0, ACCT_STR) + SGRPROJ_PRJ_MIN0; sgrproj_info->xqd[1] = aom_read_primitive_refsubexpfin( rb, SGRPROJ_PRJ_MAX1 - SGRPROJ_PRJ_MIN1 + 1, SGRPROJ_PRJ_SUBEXP_K, - ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1) + + ref_sgrproj_info->xqd[1] - SGRPROJ_PRJ_MIN1, ACCT_STR) + SGRPROJ_PRJ_MIN1; memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info)); } @@ -2622,6 +2728,7 @@ static void decode_restoration(AV1_COMMON *cm, aom_reader *rb) { } for (p = 1; p < MAX_MB_PLANE; ++p) { set_default_wiener(&ref_wiener_info); + set_default_sgrproj(&ref_sgrproj_info); rsi = &cm->rst_info[p]; if (rsi->frame_restoration_type == RESTORE_WIENER) { for (i = 0; i < ntiles_uv; ++i) { @@ -2635,6 +2742,21 @@ static void decode_restoration(AV1_COMMON *cm, aom_reader *rb) { read_wiener_filter(&rsi->wiener_info[i], &ref_wiener_info, rb); } } + } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) { + for (i = 0; i < ntiles_uv; ++i) { + if (ntiles_uv > 1) + rsi->restoration_type[i] = + aom_read(rb, RESTORE_NONE_SGRPROJ_PROB, ACCT_STR) + ? RESTORE_SGRPROJ + : RESTORE_NONE; + else + rsi->restoration_type[i] = RESTORE_SGRPROJ; + if (rsi->restoration_type[i] == RESTORE_SGRPROJ) { + read_sgrproj_filter(&rsi->sgrproj_info[i], &ref_sgrproj_info, rb); + } + } + } else if (rsi->frame_restoration_type != RESTORE_NONE) { + assert(0); } } } @@ -2703,88 +2825,46 @@ static void setup_quantization(AV1_COMMON *const cm, #endif } +// Build y/uv dequant values based on segmentation. static void setup_segmentation_dequant(AV1_COMMON *const cm) { - // Build y/uv dequant values based on segmentation. - int i = 0; -#if CONFIG_AOM_QM - int lossless; - int j = 0; - int qmlevel; - int using_qm = cm->using_qmatrix; - int minqm = cm->min_qmlevel; - int maxqm = cm->max_qmlevel; -#endif -#if CONFIG_NEW_QUANT - int b; - int dq; -#endif // CONFIG_NEW_QUANT - if (cm->seg.enabled) { - for (i = 0; i < MAX_SEGMENTS; ++i) { - const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex); - cm->y_dequant[i][0] = - av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); - cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth); - cm->uv_dequant[i][0] = - av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth); - cm->uv_dequant[i][1] = - av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth); #if CONFIG_AOM_QM - lossless = qindex == 0 && cm->y_dc_delta_q == 0 && - cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; - // NB: depends on base index so there is only 1 set per frame - // No quant weighting when lossless or signalled not using QM - qmlevel = (lossless || using_qm == 0) - ? NUM_QM_LEVELS - 1 - : aom_get_qmlevel(cm->base_qindex, minqm, maxqm); - for (j = 0; j < TX_SIZES; ++j) { - cm->y_iqmatrix[i][1][j] = aom_iqmatrix(cm, qmlevel, 0, j, 1); - cm->y_iqmatrix[i][0][j] = aom_iqmatrix(cm, qmlevel, 0, j, 0); - cm->uv_iqmatrix[i][1][j] = aom_iqmatrix(cm, qmlevel, 1, j, 1); - cm->uv_iqmatrix[i][0][j] = aom_iqmatrix(cm, qmlevel, 1, j, 0); - } -#endif // CONFIG_AOM_QM -#if CONFIG_NEW_QUANT - for (dq = 0; dq < QUANT_PROFILES; dq++) { - for (b = 0; b < COEF_BANDS; ++b) { - av1_get_dequant_val_nuq(cm->y_dequant[i][b != 0], b, - cm->y_dequant_nuq[i][dq][b], NULL, dq); - av1_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], b, - cm->uv_dequant_nuq[i][dq][b], NULL, dq); - } - } -#endif // CONFIG_NEW_QUANT - } - } else { - const int qindex = cm->base_qindex; - // When segmentation is disabled, only the first value is used. The - // remaining are don't cares. - cm->y_dequant[0][0] = av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); - cm->y_dequant[0][1] = av1_ac_quant(qindex, 0, cm->bit_depth); - cm->uv_dequant[0][0] = + const int using_qm = cm->using_qmatrix; + const int minqm = cm->min_qmlevel; + const int maxqm = cm->max_qmlevel; +#endif + // When segmentation is disabled, only the first value is used. The + // remaining are don't cares. + const int max_segments = cm->seg.enabled ? MAX_SEGMENTS : 1; + for (int i = 0; i < max_segments; ++i) { + const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex); + cm->y_dequant[i][0] = av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth); + cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth); + cm->uv_dequant[i][0] = av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth); - cm->uv_dequant[0][1] = + cm->uv_dequant[i][1] = av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth); #if CONFIG_AOM_QM - lossless = qindex == 0 && cm->y_dc_delta_q == 0 && cm->uv_dc_delta_q == 0 && - cm->uv_ac_delta_q == 0; + const int lossless = qindex == 0 && cm->y_dc_delta_q == 0 && + cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0; + // NB: depends on base index so there is only 1 set per frame // No quant weighting when lossless or signalled not using QM - qmlevel = (lossless || using_qm == 0) - ? NUM_QM_LEVELS - 1 - : aom_get_qmlevel(cm->base_qindex, minqm, maxqm); - for (j = 0; j < TX_SIZES; ++j) { + const int qmlevel = (lossless || using_qm == 0) + ? NUM_QM_LEVELS - 1 + : aom_get_qmlevel(cm->base_qindex, minqm, maxqm); + for (int j = 0; j < TX_SIZES_ALL; ++j) { cm->y_iqmatrix[i][1][j] = aom_iqmatrix(cm, qmlevel, 0, j, 1); cm->y_iqmatrix[i][0][j] = aom_iqmatrix(cm, qmlevel, 0, j, 0); cm->uv_iqmatrix[i][1][j] = aom_iqmatrix(cm, qmlevel, 1, j, 1); cm->uv_iqmatrix[i][0][j] = aom_iqmatrix(cm, qmlevel, 1, j, 0); } -#endif +#endif // CONFIG_AOM_QM #if CONFIG_NEW_QUANT - for (dq = 0; dq < QUANT_PROFILES; dq++) { - for (b = 0; b < COEF_BANDS; ++b) { - av1_get_dequant_val_nuq(cm->y_dequant[0][b != 0], b, - cm->y_dequant_nuq[0][dq][b], NULL, dq); - av1_get_dequant_val_nuq(cm->uv_dequant[0][b != 0], b, - cm->uv_dequant_nuq[0][dq][b], NULL, dq); + for (int dq = 0; dq < QUANT_PROFILES; dq++) { + for (int b = 0; b < COEF_BANDS; ++b) { + av1_get_dequant_val_nuq(cm->y_dequant[i][b != 0], b, + cm->y_dequant_nuq[i][dq][b], NULL, dq); + av1_get_dequant_val_nuq(cm->uv_dequant[i][b != 0], b, + cm->uv_dequant_nuq[i][dq][b], NULL, dq); } } #endif // CONFIG_NEW_QUANT @@ -2808,12 +2888,7 @@ static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) { static void setup_superres_size(AV1_COMMON *const cm, struct aom_read_bit_buffer *rb, int *width, int *height) { - // TODO(afergs): Test this behaviour - // Frame superres is probably in compatible with this render resolution - assert(cm->width == cm->render_width && cm->height == cm->render_height); - - cm->superres_width = cm->width; - cm->superres_height = cm->height; + // TODO(afergs): Save input resolution - it's the upscaled resolution if (aom_rb_read_bit(rb)) { cm->superres_scale_numerator = (uint8_t)aom_rb_read_literal(rb, SUPERRES_SCALE_BITS); @@ -2824,10 +2899,9 @@ static void setup_superres_size(AV1_COMMON *const cm, // by default (ie. when it isn't sent)... // resize_context_buffers() will change cm->width to equal cm->render_width, // then they'll be the same again - *width = cm->render_width = - cm->width * cm->superres_scale_numerator / SUPERRES_SCALE_DENOMINATOR; - *height = cm->render_height = - cm->height * cm->superres_scale_numerator / SUPERRES_SCALE_DENOMINATOR; + *width = *width * cm->superres_scale_numerator / SUPERRES_SCALE_DENOMINATOR; + *height = + *width * cm->superres_scale_numerator / SUPERRES_SCALE_DENOMINATOR; } else { // 1:1 scaling - ie. no scaling, scale not provided cm->superres_scale_numerator = SUPERRES_SCALE_DENOMINATOR; @@ -2940,6 +3014,9 @@ static void setup_frame_size_with_refs(AV1_COMMON *cm, if (!found) { av1_read_frame_size(rb, &width, &height); setup_render_size(cm, rb); +#if CONFIG_FRAME_SUPERRES + setup_superres_size(cm, rb, &width, &height); +#endif // CONFIG_FRAME_SUPERRES } if (width <= 0 || height <= 0) @@ -3439,10 +3516,6 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data, #endif // CONFIG_EXT_TILE int tile_row, tile_col; -#if CONFIG_SUBFRAME_PROB_UPDATE - cm->do_subframe_update = n_tiles == 1; -#endif // CONFIG_SUBFRAME_PROB_UPDATE - if (cm->lf.filter_level && !cm->skip_loop_filter && pbi->lf_worker.data1 == NULL) { CHECK_MEM_ERROR(cm, pbi->lf_worker.data1, @@ -3594,19 +3667,6 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data, if (pbi->mb.corrupted) aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Failed to decode tile data"); -#if CONFIG_SUBFRAME_PROB_UPDATE - if (cm->do_subframe_update && - cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) { - const int mi_rows_per_update = - MI_SIZE * AOMMAX(cm->mi_rows / MI_SIZE / COEF_PROBS_BUFS, 1); - if ((mi_row + MI_SIZE) % mi_rows_per_update == 0 && - mi_row + MI_SIZE < cm->mi_rows && - cm->coef_probs_update_idx < COEF_PROBS_BUFS - 1) { - av1_partial_adapt_probs(cm, mi_row, mi_col); - ++cm->coef_probs_update_idx; - } - } -#endif // CONFIG_SUBFRAME_PROB_UPDATE } } @@ -3971,15 +4031,7 @@ static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm, } #if CONFIG_HIGHBITDEPTH - if (cm->bit_depth > AOM_BITS_8) { - cm->use_highbitdepth = 1; - } else { -#if CONFIG_LOWBITDEPTH - cm->use_highbitdepth = 0; -#else - cm->use_highbitdepth = 1; -#endif - } + cm->use_highbitdepth = cm->bit_depth > AOM_BITS_8 || !CONFIG_LOWBITDEPTH; #endif cm->color_space = aom_rb_read_literal(rb, 3); @@ -4022,6 +4074,28 @@ void read_sequence_header(SequenceHeader *seq_params) { } #endif +#if CONFIG_EXT_INTER +static void read_compound_tools(AV1_COMMON *cm, + struct aom_read_bit_buffer *rb) { + (void)cm; + (void)rb; +#if CONFIG_INTERINTRA + if (!frame_is_intra_only(cm) && cm->reference_mode != COMPOUND_REFERENCE) { + cm->allow_interintra_compound = aom_rb_read_bit(rb); + } else { + cm->allow_interintra_compound = 0; + } +#endif // CONFIG_INTERINTRA +#if CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT + if (!frame_is_intra_only(cm) && cm->reference_mode != SINGLE_REFERENCE) { + cm->allow_masked_compound = aom_rb_read_bit(rb); + } else { + cm->allow_masked_compound = 0; + } +#endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT +} +#endif // CONFIG_EXT_INTER + static size_t read_uncompressed_header(AV1Decoder *pbi, struct aom_read_bit_buffer *rb) { AV1_COMMON *const cm = &pbi->common; @@ -4163,14 +4237,20 @@ static size_t read_uncompressed_header(AV1Decoder *pbi, #if CONFIG_ANS && ANS_MAX_SYMBOLS cm->ans_window_size_log2 = aom_rb_read_literal(rb, 4) + 8; #endif // CONFIG_ANS && ANS_MAX_SYMBOLS -#if CONFIG_PALETTE +#if CONFIG_PALETTE || CONFIG_INTRABC cm->allow_screen_content_tools = aom_rb_read_bit(rb); -#endif // CONFIG_PALETTE +#endif // CONFIG_PALETTE || CONFIG_INTRABC +#if CONFIG_TEMPMV_SIGNALING + cm->use_prev_frame_mvs = 0; +#endif } else { cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb); -#if CONFIG_PALETTE +#if CONFIG_PALETTE || CONFIG_INTRABC if (cm->intra_only) cm->allow_screen_content_tools = aom_rb_read_bit(rb); -#endif // CONFIG_PALETTE +#endif // CONFIG_PALETTE || CONFIG_INTRABC +#if CONFIG_TEMPMV_SIGNALING + if (cm->intra_only || cm->error_resilient_mode) cm->use_prev_frame_mvs = 0; +#endif if (cm->error_resilient_mode) { cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL; } else { @@ -4293,9 +4373,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi, } #endif -#if CONFIG_HIGHBITDEPTH get_frame_new_buffer(cm)->bit_depth = cm->bit_depth; -#endif get_frame_new_buffer(cm)->color_space = cm->color_space; get_frame_new_buffer(cm)->color_range = cm->color_range; get_frame_new_buffer(cm)->render_width = cm->render_width; @@ -4361,9 +4439,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi, decode_restoration_mode(cm, rb); #endif // CONFIG_LOOP_RESTORATION setup_quantization(cm, rb); -#if CONFIG_HIGHBITDEPTH xd->bd = (int)cm->bit_depth; -#endif #if CONFIG_Q_ADAPT_PROBS av1_default_coef_probs(cm); @@ -4427,6 +4503,9 @@ static size_t read_uncompressed_header(AV1Decoder *pbi, setup_segmentation_dequant(cm); cm->tx_mode = read_tx_mode(cm, xd, rb); cm->reference_mode = read_frame_reference_mode(cm, rb); +#if CONFIG_EXT_INTER + read_compound_tools(cm, rb); +#endif // CONFIG_EXT_INTER #if CONFIG_EXT_TX cm->reduced_tx_set_used = aom_rb_read_bit(rb); @@ -4505,37 +4584,41 @@ static void read_global_motion_params(WarpedMotionParams *params, params->wmmat[6] = aom_read_signed_primitive_refsubexpfin( r, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K, - (ref_params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF)) * + (ref_params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF), ACCT_STR) * GM_ROW3HOMO_DECODE_FACTOR; if (type != VERTRAPEZOID) params->wmmat[7] = aom_read_signed_primitive_refsubexpfin( r, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K, - (ref_params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF)) * + (ref_params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF), ACCT_STR) * GM_ROW3HOMO_DECODE_FACTOR; case AFFINE: case ROTZOOM: params->wmmat[2] = aom_read_signed_primitive_refsubexpfin( r, GM_ALPHA_MAX + 1, SUBEXPFIN_K, (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) - - (1 << GM_ALPHA_PREC_BITS)) * + (1 << GM_ALPHA_PREC_BITS), + ACCT_STR) * GM_ALPHA_DECODE_FACTOR + (1 << WARPEDMODEL_PREC_BITS); if (type != VERTRAPEZOID) - params->wmmat[3] = aom_read_signed_primitive_refsubexpfin( - r, GM_ALPHA_MAX + 1, SUBEXPFIN_K, - (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF)) * - GM_ALPHA_DECODE_FACTOR; + params->wmmat[3] = + aom_read_signed_primitive_refsubexpfin( + r, GM_ALPHA_MAX + 1, SUBEXPFIN_K, + (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF), ACCT_STR) * + GM_ALPHA_DECODE_FACTOR; if (type >= AFFINE) { if (type != HORTRAPEZOID) - params->wmmat[4] = aom_read_signed_primitive_refsubexpfin( - r, GM_ALPHA_MAX + 1, SUBEXPFIN_K, - (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF)) * - GM_ALPHA_DECODE_FACTOR; + params->wmmat[4] = + aom_read_signed_primitive_refsubexpfin( + r, GM_ALPHA_MAX + 1, SUBEXPFIN_K, + (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF), ACCT_STR) * + GM_ALPHA_DECODE_FACTOR; params->wmmat[5] = aom_read_signed_primitive_refsubexpfin( r, GM_ALPHA_MAX + 1, SUBEXPFIN_K, (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) - - (1 << GM_ALPHA_PREC_BITS)) * + (1 << GM_ALPHA_PREC_BITS), + ACCT_STR) * GM_ALPHA_DECODE_FACTOR + (1 << WARPEDMODEL_PREC_BITS); } else { @@ -4552,14 +4635,16 @@ static void read_global_motion_params(WarpedMotionParams *params, trans_prec_diff = (type == TRANSLATION) ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp : GM_TRANS_PREC_DIFF; - params->wmmat[0] = aom_read_signed_primitive_refsubexpfin( - r, (1 << trans_bits) + 1, SUBEXPFIN_K, - (ref_params->wmmat[0] >> trans_prec_diff)) * - trans_dec_factor; - params->wmmat[1] = aom_read_signed_primitive_refsubexpfin( - r, (1 << trans_bits) + 1, SUBEXPFIN_K, - (ref_params->wmmat[1] >> trans_prec_diff)) * - trans_dec_factor; + params->wmmat[0] = + aom_read_signed_primitive_refsubexpfin( + r, (1 << trans_bits) + 1, SUBEXPFIN_K, + (ref_params->wmmat[0] >> trans_prec_diff), ACCT_STR) * + trans_dec_factor; + params->wmmat[1] = + aom_read_signed_primitive_refsubexpfin( + r, (1 << trans_bits) + 1, SUBEXPFIN_K, + (ref_params->wmmat[1] >> trans_prec_diff), ACCT_STR) * + trans_dec_factor; case IDENTITY: break; default: assert(0); } @@ -4621,14 +4706,18 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, #if !CONFIG_EC_ADAPT if (cm->tx_mode == TX_MODE_SELECT) read_tx_size_probs(fc, &r); #endif +#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT + if (cm->tx_mode == TX_MODE_SELECT) + av1_diff_update_prob(&r, &fc->quarter_tx_size_prob, ACCT_STR); +#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT #if CONFIG_LV_MAP av1_read_txb_probs(fc, cm->tx_mode, &r); #else // CONFIG_LV_MAP #if !CONFIG_PVQ -#if !(CONFIG_EC_ADAPT && CONFIG_NEW_TOKENSET) +#if !CONFIG_EC_ADAPT read_coef_probs(fc, cm->tx_mode, &r); -#endif // !(CONFIG_EC_ADAPT && CONFIG_NEW_TOKENSET) +#endif // !CONFIG_EC_ADAPT #endif // !CONFIG_PVQ #endif // CONFIG_LV_MAP @@ -4699,24 +4788,26 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, if (frame_is_intra_only(cm)) { av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob); -#if CONFIG_EC_MULTISYMBOL av1_copy(cm->fc->kf_y_cdf, av1_kf_y_mode_cdf); -#endif #if !CONFIG_EC_ADAPT for (k = 0; k < INTRA_MODES; k++) for (j = 0; j < INTRA_MODES; j++) for (i = 0; i < INTRA_MODES - 1; ++i) av1_diff_update_prob(&r, &cm->kf_y_prob[k][j][i], ACCT_STR); #endif - } else { -#if !CONFIG_REF_MV - nmv_context *const nmvc = &fc->nmvc; +#if CONFIG_INTRABC + if (cm->allow_screen_content_tools) { + av1_diff_update_prob(&r, &fc->intrabc_prob, ACCT_STR); + } #endif + } else { read_inter_mode_probs(fc, &r); #if CONFIG_EXT_INTER read_inter_compound_mode_probs(fc, &r); - if (cm->reference_mode != COMPOUND_REFERENCE) { +#if CONFIG_INTERINTRA + if (cm->reference_mode != COMPOUND_REFERENCE && + cm->allow_interintra_compound) { for (i = 0; i < BLOCK_SIZE_GROUPS; i++) { if (is_interintra_allowed_bsize_group(i)) { av1_diff_update_prob(&r, &fc->interintra_prob[i], ACCT_STR); @@ -4726,14 +4817,17 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, for (j = 0; j < INTERINTRA_MODES - 1; j++) av1_diff_update_prob(&r, &fc->interintra_mode_prob[i][j], ACCT_STR); } +#if CONFIG_WEDGE for (i = 0; i < BLOCK_SIZES; i++) { if (is_interintra_allowed_bsize(i) && is_interintra_wedge_used(i)) { av1_diff_update_prob(&r, &fc->wedge_interintra_prob[i], ACCT_STR); } } +#endif // CONFIG_WEDGE } +#endif // CONFIG_INTERINTRA #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE - if (cm->reference_mode != SINGLE_REFERENCE) { + if (cm->reference_mode != SINGLE_REFERENCE && cm->allow_masked_compound) { for (i = 0; i < BLOCK_SIZES; i++) { for (j = 0; j < COMPOUND_TYPES - 1; j++) { av1_diff_update_prob(&r, &fc->compound_type_prob[i][j], ACCT_STR); @@ -4768,12 +4862,8 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, } #endif -#if CONFIG_REF_MV for (i = 0; i < NMV_CONTEXTS; ++i) read_mv_probs(&fc->nmvc[i], cm->allow_high_precision_mv, &r); -#else - read_mv_probs(nmvc, cm->allow_high_precision_mv, &r); -#endif #if !CONFIG_EC_ADAPT read_ext_tx_probs(fc, &r); #endif // EC_ADAPT @@ -4782,21 +4872,15 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, #endif #if CONFIG_GLOBAL_MOTION read_global_motion(cm, &r); -#endif // EC_ADAPT, DAALA_EC +#endif } -#if CONFIG_EC_MULTISYMBOL && !CONFIG_EC_ADAPT -#if CONFIG_NEW_TOKENSET +#if !CONFIG_EC_ADAPT av1_coef_head_cdfs(fc); -#endif /* Make tail distribution from head */ av1_coef_pareto_cdfs(fc); -#if CONFIG_REF_MV for (i = 0; i < NMV_CONTEXTS; ++i) av1_set_mv_cdfs(&fc->nmvc[i]); -#else - av1_set_mv_cdfs(&fc->nmvc); -#endif av1_set_mode_cdfs(cm); -#endif // CONFIG_EC_MULTISYMBOL && !CONFIG_EC_ADAPT +#endif // !CONFIG_EC_ADAPT return aom_reader_has_error(&r); } @@ -4820,10 +4904,8 @@ static void debug_check_frame_counts(const AV1_COMMON *const cm) { assert(!memcmp(cm->counts.coef, zero_counts.coef, sizeof(cm->counts.coef))); assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch, sizeof(cm->counts.eob_branch))); -#if CONFIG_EC_MULTISYMBOL assert(!memcmp(cm->counts.blockz_count, zero_counts.blockz_count, sizeof(cm->counts.blockz_count))); -#endif assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp, sizeof(cm->counts.switchable_interp))); assert(!memcmp(cm->counts.inter_mode, zero_counts.inter_mode, @@ -4832,10 +4914,14 @@ static void debug_check_frame_counts(const AV1_COMMON *const cm) { assert(!memcmp(cm->counts.inter_compound_mode, zero_counts.inter_compound_mode, sizeof(cm->counts.inter_compound_mode))); +#if CONFIG_INTERINTRA assert(!memcmp(cm->counts.interintra, zero_counts.interintra, sizeof(cm->counts.interintra))); +#if CONFIG_WEDGE assert(!memcmp(cm->counts.wedge_interintra, zero_counts.wedge_interintra, sizeof(cm->counts.wedge_interintra))); +#endif // CONFIG_WEDGE +#endif // CONFIG_INTERINTRA assert(!memcmp(cm->counts.compound_interinter, zero_counts.compound_interinter, sizeof(cm->counts.compound_interinter))); @@ -4859,14 +4945,10 @@ static void debug_check_frame_counts(const AV1_COMMON *const cm) { assert(!memcmp(&cm->counts.tx_size, &zero_counts.tx_size, sizeof(cm->counts.tx_size))); assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip))); -#if CONFIG_REF_MV assert( !memcmp(&cm->counts.mv[0], &zero_counts.mv[0], sizeof(cm->counts.mv[0]))); assert( !memcmp(&cm->counts.mv[1], &zero_counts.mv[1], sizeof(cm->counts.mv[0]))); -#else - assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv))); -#endif assert(!memcmp(cm->counts.inter_ext_tx, zero_counts.inter_ext_tx, sizeof(cm->counts.inter_ext_tx))); assert(!memcmp(cm->counts.intra_ext_tx, zero_counts.intra_ext_tx, @@ -4931,6 +5013,9 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data, uint8_t clear_data[MAX_AV1_HEADER_SIZE]; size_t first_partition_size; YV12_BUFFER_CONFIG *new_fb; +#if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING + RefBuffer *last_fb_ref_buf = &cm->frame_refs[LAST_FRAME - LAST_FRAME]; +#endif // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING #if CONFIG_ADAPT_SCAN av1_deliver_eob_threshold(cm, xd); @@ -4958,6 +5043,18 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data, #endif new_fb = get_frame_new_buffer(cm); xd->cur_buf = new_fb; +#if CONFIG_INTRABC +#if CONFIG_HIGHBITDEPTH + av1_setup_scale_factors_for_frame( + &xd->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height, + xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height, + cm->use_highbitdepth); +#else + av1_setup_scale_factors_for_frame( + &xd->sf_identity, xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height, + xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height); +#endif // CONFIG_HIGHBITDEPTH +#endif // CONFIG_INTRABC #if CONFIG_GLOBAL_MOTION int i; for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) { @@ -4978,26 +5075,9 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data, aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Truncated packet or corrupt header length"); -#if CONFIG_REF_MV cm->setup_mi(cm); -#endif -#if CONFIG_TEMPMV_SIGNALING - if (cm->use_prev_frame_mvs) { - RefBuffer *last_fb_ref_buf = &cm->frame_refs[LAST_FRAME - LAST_FRAME]; - cm->prev_frame = &cm->buffer_pool->frame_bufs[last_fb_ref_buf->idx]; - assert(!cm->error_resilient_mode && - cm->width == last_fb_ref_buf->buf->y_width && - cm->height == last_fb_ref_buf->buf->y_height && - !cm->prev_frame->intra_only); - } -#else - cm->use_prev_frame_mvs = - !cm->error_resilient_mode && cm->width == cm->last_width && - cm->height == cm->last_height && !cm->last_intra_only && - cm->last_show_frame && (cm->last_frame_type != KEY_FRAME); -#endif -#if CONFIG_EXT_REFS +#if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING // NOTE(zoeliu): As cm->prev_frame can take neither a frame of // show_exisiting_frame=1, nor can it take a frame not used as // a reference, it is probable that by the time it is being @@ -5008,16 +5088,33 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data, // (1) Simply disable the use of previous frame mvs; or // (2) Have cm->prev_frame point to one reference frame buffer, // e.g. LAST_FRAME. - if (cm->use_prev_frame_mvs && !dec_is_ref_frame_buf(pbi, cm->prev_frame)) { + if (!dec_is_ref_frame_buf(pbi, cm->prev_frame)) { // Reassign the LAST_FRAME buffer to cm->prev_frame. - RefBuffer *last_fb_ref_buf = &cm->frame_refs[LAST_FRAME - LAST_FRAME]; - cm->prev_frame = &cm->buffer_pool->frame_bufs[last_fb_ref_buf->idx]; + cm->prev_frame = last_fb_ref_buf->idx != INVALID_IDX + ? &cm->buffer_pool->frame_bufs[last_fb_ref_buf->idx] + : NULL; } -#endif // CONFIG_EXT_REFS +#endif // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING + +#if CONFIG_TEMPMV_SIGNALING + if (cm->use_prev_frame_mvs) { + assert(!cm->error_resilient_mode && cm->prev_frame && + cm->width == last_fb_ref_buf->buf->y_width && + cm->height == last_fb_ref_buf->buf->y_height && + !cm->prev_frame->intra_only); + } +#else + cm->use_prev_frame_mvs = !cm->error_resilient_mode && cm->prev_frame && + cm->width == cm->prev_frame->buf.y_crop_width && + cm->height == cm->prev_frame->buf.y_crop_height && + !cm->last_intra_only && cm->last_show_frame && + (cm->last_frame_type != KEY_FRAME); +#endif // CONFIG_TEMPMV_SIGNALING av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y); *cm->fc = cm->frame_contexts[cm->frame_context_idx]; + cm->pre_fc = &cm->frame_contexts[cm->frame_context_idx]; if (!cm->fc->initialized) aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME, "Uninitialized entropy context."); @@ -5053,11 +5150,6 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data, av1_frameworker_unlock_stats(worker); } -#if CONFIG_SUBFRAME_PROB_UPDATE - av1_copy(cm->starting_coef_probs, cm->fc->coef_probs); - cm->coef_probs_update_idx = 0; -#endif // CONFIG_SUBFRAME_PROB_UPDATE - if (pbi->max_threads > 1 && !CONFIG_CB4X4 && #if CONFIG_EXT_TILE pbi->dec_tile_col < 0 && // Decoding all columns @@ -5105,10 +5197,6 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data, sizeof(&pbi->tile_data[0].tctx.partition_cdf[0][0])); make_update_tile_list_dec(pbi, cm->tile_rows, cm->tile_cols, tile_ctxs); #endif - -#if CONFIG_SUBFRAME_PROB_UPDATE - cm->partial_prob_update = 0; -#endif // CONFIG_SUBFRAME_PROB_UPDATE av1_adapt_coef_probs(cm); av1_adapt_intra_frame_probs(cm); #if CONFIG_EC_ADAPT diff --git a/third_party/aom/av1/decoder/decodemv.c b/third_party/aom/av1/decoder/decodemv.c index ec0f87751..b3ce86e49 100644 --- a/third_party/aom/av1/decoder/decodemv.c +++ b/third_party/aom/av1/decoder/decodemv.c @@ -45,16 +45,10 @@ static INLINE int read_uniform(aom_reader *r, int n) { } #endif // CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE -#if CONFIG_EC_MULTISYMBOL static PREDICTION_MODE read_intra_mode(aom_reader *r, aom_cdf_prob *cdf) { return (PREDICTION_MODE) av1_intra_mode_inv[aom_read_symbol(r, cdf, INTRA_MODES, ACCT_STR)]; } -#else -static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) { - return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p, ACCT_STR); -} -#endif #if CONFIG_DELTA_Q static int read_delta_qindex(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r, @@ -75,16 +69,7 @@ static int read_delta_qindex(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r, #endif if ((bsize != BLOCK_LARGEST || mbmi->skip == 0) && read_delta_q_flag) { -#if !CONFIG_EC_MULTISYMBOL - int bit = 1; - abs = 0; - while (abs < DELTA_Q_SMALL && bit) { - bit = aom_read(r, ec_ctx->delta_q_prob[abs], ACCT_STR); - abs += bit; - } -#else abs = aom_read_symbol(r, ec_ctx->delta_q_cdf, DELTA_Q_PROBS + 1, ACCT_STR); -#endif smallval = (abs < DELTA_Q_SMALL); if (counts) { for (i = 0; i < abs; ++i) counts->delta_q[i][1]++; @@ -127,17 +112,8 @@ static int read_delta_lflevel(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r, #endif if ((bsize != BLOCK_64X64 || mbmi->skip == 0) && read_delta_lf_flag) { -#if !CONFIG_EC_MULTISYMBOL - int bit = 1; - abs = 0; - while (abs < DELTA_LF_SMALL && bit) { - bit = aom_read(r, ec_ctx->delta_lf_prob[abs], ACCT_STR); - abs += bit; - } -#else abs = aom_read_symbol(r, ec_ctx->delta_lf_cdf, DELTA_LF_PROBS + 1, ACCT_STR); -#endif smallval = (abs < DELTA_LF_SMALL); if (counts) { for (i = 0; i < abs; ++i) counts->delta_lf[i][1]++; @@ -162,52 +138,51 @@ static int read_delta_lflevel(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r, #endif #endif -static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd, +static PREDICTION_MODE read_intra_mode_y(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd, aom_reader *r, int size_group) { -#if CONFIG_EC_ADAPT - FRAME_CONTEXT *ec_ctx = xd->tile_ctx; -#elif CONFIG_EC_MULTISYMBOL - FRAME_CONTEXT *ec_ctx = cm->fc; -#endif - const PREDICTION_MODE y_mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, ec_ctx->y_mode_cdf[size_group]); -#else - read_intra_mode(r, cm->fc->y_mode_prob[size_group]); -#endif FRAME_COUNTS *counts = xd->counts; -#if CONFIG_EC_ADAPT - (void)cm; -#endif if (counts) ++counts->y_mode[size_group][y_mode]; return y_mode; } -static PREDICTION_MODE read_intra_mode_uv(AV1_COMMON *cm, MACROBLOCKD *xd, - aom_reader *r, +static PREDICTION_MODE read_intra_mode_uv(FRAME_CONTEXT *ec_ctx, + MACROBLOCKD *xd, aom_reader *r, PREDICTION_MODE y_mode) { -#if CONFIG_EC_ADAPT - FRAME_CONTEXT *ec_ctx = xd->tile_ctx; -#elif CONFIG_EC_MULTISYMBOL - FRAME_CONTEXT *ec_ctx = cm->fc; -#endif - const PREDICTION_MODE uv_mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, ec_ctx->uv_mode_cdf[y_mode]); -#else - read_intra_mode(r, cm->fc->uv_mode_prob[y_mode]); -#endif FRAME_COUNTS *counts = xd->counts; -#if CONFIG_EC_ADAPT - (void)cm; -#endif if (counts) ++counts->uv_mode[y_mode][uv_mode]; return uv_mode; } -#if CONFIG_EXT_INTER +#if CONFIG_CFL +static int read_cfl_alphas(FRAME_CONTEXT *const ec_ctx, aom_reader *r, int skip, + CFL_SIGN_TYPE signs_out[CFL_PRED_PLANES]) { + if (skip) { + signs_out[CFL_PRED_U] = CFL_SIGN_POS; + signs_out[CFL_PRED_V] = CFL_SIGN_POS; + return 0; + } else { + const int ind = aom_read_symbol(r, ec_ctx->cfl_alpha_cdf, CFL_ALPHABET_SIZE, + "cfl:alpha"); + // Signs are only coded for nonzero values + // sign == 0 implies negative alpha + // sign == 1 implies positive alpha + signs_out[CFL_PRED_U] = cfl_alpha_codes[ind][CFL_PRED_U] + ? aom_read_bit(r, "cfl:sign") + : CFL_SIGN_POS; + signs_out[CFL_PRED_V] = cfl_alpha_codes[ind][CFL_PRED_V] + ? aom_read_bit(r, "cfl:sign") + : CFL_SIGN_POS; + + return ind; + } +} +#endif + +#if CONFIG_EXT_INTER && CONFIG_INTERINTRA static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r, int size_group) { const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)aom_read_tree( @@ -217,11 +192,10 @@ static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd, if (counts) ++counts->interintra_mode[size_group][ii_mode]; return ii_mode; } -#endif // CONFIG_EXT_INTER +#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd, aom_reader *r, int16_t ctx) { -#if CONFIG_REF_MV FRAME_COUNTS *counts = xd->counts; int16_t mode_ctx = ctx & NEWMV_CTX_MASK; aom_prob mode_prob = ec_ctx->newmv_prob[mode_ctx]; @@ -262,22 +236,8 @@ static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd, // Invalid prediction mode. assert(0); -#else -#if CONFIG_EC_MULTISYMBOL - const int mode = av1_inter_mode_inv[aom_read_symbol( - r, ec_ctx->inter_mode_cdf[ctx], INTER_MODES, ACCT_STR)]; -#else - const int mode = aom_read_tree(r, av1_inter_mode_tree, - ec_ctx->inter_mode_probs[ctx], ACCT_STR); -#endif - FRAME_COUNTS *counts = xd->counts; - if (counts) ++counts->inter_mode[ctx][mode]; - - return NEARESTMV + mode; -#endif } -#if CONFIG_REF_MV static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd, MB_MODE_INFO *mbmi, aom_reader *r) { uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame); @@ -324,7 +284,6 @@ static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd, } } } -#endif #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION static MOTION_MODE read_motion_mode(AV1_COMMON *cm, MACROBLOCKD *xd, @@ -373,11 +332,7 @@ static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd, #endif // CONFIG_EXT_INTER static int read_segment_id(aom_reader *r, struct segmentation_probs *segp) { -#if CONFIG_EC_MULTISYMBOL return aom_read_symbol(r, segp->tree_cdf, MAX_SEGMENTS, ACCT_STR); -#else - return aom_read_tree(r, av1_segment_tree, segp->tree_probs, ACCT_STR); -#endif } #if CONFIG_VAR_TX @@ -390,8 +345,8 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd, const int tx_col = blk_col >> 1; const int max_blocks_high = max_block_high(xd, mbmi->sb_type, 0); const int max_blocks_wide = max_block_wide(xd, mbmi->sb_type, 0); - int ctx = txfm_partition_context(xd->above_txfm_context + tx_col, - xd->left_txfm_context + tx_row, + int ctx = txfm_partition_context(xd->above_txfm_context + blk_col, + xd->left_txfm_context + blk_row, mbmi->sb_type, tx_size); TX_SIZE(*const inter_tx_size) [MAX_MIB_SIZE] = @@ -407,8 +362,8 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd, mbmi->tx_size = tx_size; mbmi->min_tx_size = AOMMIN(mbmi->min_tx_size, get_min_tx_size(tx_size)); if (counts) ++counts->txfm_partition[ctx][0]; - txfm_partition_update(xd->above_txfm_context + tx_col, - xd->left_txfm_context + tx_row, tx_size, tx_size); + txfm_partition_update(xd->above_txfm_context + blk_col, + xd->left_txfm_context + blk_row, tx_size, tx_size); return; } @@ -429,8 +384,8 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd, inter_tx_size[idy][idx] = inter_tx_size[0][0]; mbmi->tx_size = sub_txs; mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); - txfm_partition_update(xd->above_txfm_context + tx_col, - xd->left_txfm_context + tx_row, sub_txs, tx_size); + txfm_partition_update(xd->above_txfm_context + blk_col, + xd->left_txfm_context + blk_row, sub_txs, tx_size); return; } @@ -450,8 +405,8 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd, mbmi->tx_size = tx_size; mbmi->min_tx_size = AOMMIN(mbmi->min_tx_size, get_min_tx_size(tx_size)); if (counts) ++counts->txfm_partition[ctx][0]; - txfm_partition_update(xd->above_txfm_context + tx_col, - xd->left_txfm_context + tx_row, tx_size, tx_size); + txfm_partition_update(xd->above_txfm_context + blk_col, + xd->left_txfm_context + blk_row, tx_size, tx_size); } } #endif @@ -467,14 +422,8 @@ static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, FRAME_CONTEXT *ec_ctx = cm->fc; #endif - const int depth = -#if CONFIG_EC_MULTISYMBOL - aom_read_symbol(r, ec_ctx->tx_size_cdf[tx_size_cat][ctx], tx_size_cat + 2, - ACCT_STR); -#else - aom_read_tree(r, av1_tx_size_tree[tx_size_cat], - ec_ctx->tx_size_probs[tx_size_cat][ctx], ACCT_STR); -#endif + const int depth = aom_read_symbol(r, ec_ctx->tx_size_cdf[tx_size_cat][ctx], + tx_size_cat + 2, ACCT_STR); const TX_SIZE tx_size = depth_to_tx_size(depth); #if CONFIG_RECT_TX assert(!is_rect_tx(tx_size)); @@ -498,14 +447,25 @@ static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int is_inter, : intra_tx_size_cat_lookup[bsize]; const TX_SIZE coded_tx_size = read_selected_tx_size(cm, xd, tx_size_cat, r); -#if CONFIG_EXT_TX && CONFIG_RECT_TX +#if CONFIG_RECT_TX && (CONFIG_EXT_TX || CONFIG_VAR_TX) if (coded_tx_size > max_txsize_lookup[bsize]) { assert(coded_tx_size == max_txsize_lookup[bsize] + 1); +#if CONFIG_EXT_TX && CONFIG_RECT_TX_EXT + if (is_quarter_tx_allowed(xd, &xd->mi[0]->mbmi, is_inter)) { + int quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR); + FRAME_COUNTS *counts = xd->counts; + + if (counts) ++counts->quarter_tx_size[quarter_tx]; + return quarter_tx ? quarter_txsize_lookup[bsize] + : max_txsize_rect_lookup[bsize]; + } +#endif // CONFIG_EXT_TX && CONFIG_RECT_TX_EXT + return max_txsize_rect_lookup[bsize]; } #else assert(coded_tx_size <= max_txsize_lookup[bsize]); -#endif // CONFIG_EXT_TX && CONFIG_RECT_TX +#endif // CONFIG_RECT_TX && (CONFIG_EXT_TX || CONFIG_VAR_TX) return coded_tx_size; } else { return tx_size_from_tx_mode(bsize, tx_mode, is_inter); @@ -636,6 +596,93 @@ static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id, } #if CONFIG_PALETTE +#if CONFIG_PALETTE_DELTA_ENCODING +static int uint16_compare(const void *a, const void *b) { + const uint16_t va = *(const uint16_t *)a; + const uint16_t vb = *(const uint16_t *)b; + return va - vb; +} + +static void read_palette_colors_y(MACROBLOCKD *const xd, int bit_depth, + PALETTE_MODE_INFO *const pmi, aom_reader *r) { + uint16_t color_cache[2 * PALETTE_MAX_SIZE]; + const MODE_INFO *const above_mi = xd->above_mi; + const MODE_INFO *const left_mi = xd->left_mi; + const int n_cache = av1_get_palette_cache(above_mi, left_mi, 0, color_cache); + const int n = pmi->palette_size[0]; + int idx = 0; + for (int i = 0; i < n_cache && idx < n; ++i) + if (aom_read_bit(r, ACCT_STR)) pmi->palette_colors[idx++] = color_cache[i]; + if (idx < n) { + pmi->palette_colors[idx++] = aom_read_literal(r, bit_depth, ACCT_STR); + if (idx < n) { + const int min_bits = bit_depth - 3; + int bits = min_bits + aom_read_literal(r, 2, ACCT_STR); + int range = (1 << bit_depth) - pmi->palette_colors[idx - 1] - 1; + for (; idx < n; ++idx) { + const int delta = aom_read_literal(r, bits, ACCT_STR) + 1; + pmi->palette_colors[idx] = pmi->palette_colors[idx - 1] + delta; + range -= delta; + bits = AOMMIN(bits, av1_ceil_log2(range)); + } + } + } + qsort(pmi->palette_colors, n, sizeof(pmi->palette_colors[0]), uint16_compare); +} + +static void read_palette_colors_uv(MACROBLOCKD *const xd, int bit_depth, + PALETTE_MODE_INFO *const pmi, + aom_reader *r) { + const int n = pmi->palette_size[1]; + // U channel colors. + uint16_t color_cache[2 * PALETTE_MAX_SIZE]; + const MODE_INFO *const above_mi = xd->above_mi; + const MODE_INFO *const left_mi = xd->left_mi; + const int n_cache = av1_get_palette_cache(above_mi, left_mi, 1, color_cache); + int idx = PALETTE_MAX_SIZE; + for (int i = 0; i < n_cache && idx < PALETTE_MAX_SIZE + n; ++i) + if (aom_read_bit(r, ACCT_STR)) pmi->palette_colors[idx++] = color_cache[i]; + if (idx < PALETTE_MAX_SIZE + n) { + pmi->palette_colors[idx++] = aom_read_literal(r, bit_depth, ACCT_STR); + if (idx < PALETTE_MAX_SIZE + n) { + const int min_bits = bit_depth - 3; + int bits = min_bits + aom_read_literal(r, 2, ACCT_STR); + int range = (1 << bit_depth) - pmi->palette_colors[idx - 1]; + for (; idx < PALETTE_MAX_SIZE + n; ++idx) { + const int delta = aom_read_literal(r, bits, ACCT_STR); + pmi->palette_colors[idx] = pmi->palette_colors[idx - 1] + delta; + range -= delta; + bits = AOMMIN(bits, av1_ceil_log2(range)); + } + } + } + qsort(pmi->palette_colors + PALETTE_MAX_SIZE, n, + sizeof(pmi->palette_colors[0]), uint16_compare); + + // V channel colors. + if (aom_read_bit(r, ACCT_STR)) { // Delta encoding. + const int min_bits_v = bit_depth - 4; + const int max_val = 1 << bit_depth; + int bits = min_bits_v + aom_read_literal(r, 2, ACCT_STR); + pmi->palette_colors[2 * PALETTE_MAX_SIZE] = + aom_read_literal(r, bit_depth, ACCT_STR); + for (int i = 1; i < n; ++i) { + int delta = aom_read_literal(r, bits, ACCT_STR); + if (delta && aom_read_bit(r, ACCT_STR)) delta = -delta; + int val = (int)pmi->palette_colors[2 * PALETTE_MAX_SIZE + i - 1] + delta; + if (val < 0) val += max_val; + if (val >= max_val) val -= max_val; + pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] = val; + } + } else { + for (int i = 0; i < n; ++i) { + pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] = + aom_read_literal(r, bit_depth, ACCT_STR); + } + } +} +#endif // CONFIG_PALETTE_DELTA_ENCODING + static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd, aom_reader *r) { MODE_INFO *const mi = xd->mi[0]; @@ -643,7 +690,7 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd, const MODE_INFO *const above_mi = xd->above_mi; const MODE_INFO *const left_mi = xd->left_mi; const BLOCK_SIZE bsize = mbmi->sb_type; - int i, n; + int n; PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info; if (mbmi->mode == DC_PRED) { @@ -664,16 +711,9 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd, 2; n = pmi->palette_size[0]; #if CONFIG_PALETTE_DELTA_ENCODING - const int min_bits = cm->bit_depth - 3; - int bits = min_bits + aom_read_literal(r, 2, ACCT_STR); - pmi->palette_colors[0] = aom_read_literal(r, cm->bit_depth, ACCT_STR); - for (i = 1; i < n; ++i) { - pmi->palette_colors[i] = pmi->palette_colors[i - 1] + - aom_read_literal(r, bits, ACCT_STR) + 1; - bits = AOMMIN( - bits, av1_ceil_log2((1 << cm->bit_depth) - pmi->palette_colors[i])); - } + read_palette_colors_y(xd, cm->bit_depth, pmi, r); #else + int i; for (i = 0; i < n; ++i) pmi->palette_colors[i] = aom_read_literal(r, cm->bit_depth, ACCT_STR); #endif // CONFIG_PALETTE_DELTA_ENCODING @@ -693,42 +733,9 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd, 2; n = pmi->palette_size[1]; #if CONFIG_PALETTE_DELTA_ENCODING - // U channel colors. - const int min_bits_u = cm->bit_depth - 3; - int bits = min_bits_u + aom_read_literal(r, 2, ACCT_STR); - pmi->palette_colors[PALETTE_MAX_SIZE] = - aom_read_literal(r, cm->bit_depth, ACCT_STR); - for (i = 1; i < n; ++i) { - pmi->palette_colors[PALETTE_MAX_SIZE + i] = - pmi->palette_colors[PALETTE_MAX_SIZE + i - 1] + - aom_read_literal(r, bits, ACCT_STR); - bits = AOMMIN(bits, - av1_ceil_log2(1 + (1 << cm->bit_depth) - - pmi->palette_colors[PALETTE_MAX_SIZE + i])); - } - // V channel colors. - if (aom_read_bit(r, ACCT_STR)) { // Delta encoding. - const int min_bits_v = cm->bit_depth - 4; - const int max_val = 1 << cm->bit_depth; - bits = min_bits_v + aom_read_literal(r, 2, ACCT_STR); - pmi->palette_colors[2 * PALETTE_MAX_SIZE] = - aom_read_literal(r, cm->bit_depth, ACCT_STR); - for (i = 1; i < n; ++i) { - int delta = aom_read_literal(r, bits, ACCT_STR); - if (delta && aom_read_bit(r, ACCT_STR)) delta = -delta; - int val = - (int)pmi->palette_colors[2 * PALETTE_MAX_SIZE + i - 1] + delta; - if (val < 0) val += max_val; - if (val >= max_val) val -= max_val; - pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] = val; - } - } else { - for (i = 0; i < n; ++i) { - pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] = - aom_read_literal(r, cm->bit_depth, ACCT_STR); - } - } + read_palette_colors_uv(xd, cm->bit_depth, pmi, r); #else + int i; for (i = 0; i < n; ++i) { pmi->palette_colors[PALETTE_MAX_SIZE + i] = aom_read_literal(r, cm->bit_depth, ACCT_STR); @@ -745,7 +752,8 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd, #if CONFIG_FILTER_INTRA static void read_filter_intra_mode_info(AV1_COMMON *const cm, - MACROBLOCKD *const xd, aom_reader *r) { + MACROBLOCKD *const xd, int mi_row, + int mi_col, aom_reader *r) { MODE_INFO *const mi = xd->mi[0]; MB_MODE_INFO *const mbmi = &mi->mbmi; FRAME_COUNTS *counts = xd->counts; @@ -768,6 +776,17 @@ static void read_filter_intra_mode_info(AV1_COMMON *const cm, ->filter_intra[0][filter_intra_mode_info->use_filter_intra_mode[0]]; } } + +#if CONFIG_CB4X4 + if (!is_chroma_reference(mi_row, mi_col, mbmi->sb_type, + xd->plane[1].subsampling_x, + xd->plane[1].subsampling_y)) + return; +#else + (void)mi_row; + (void)mi_col; +#endif // CONFIG_CB4X4 + if (mbmi->uv_mode == DC_PRED #if CONFIG_PALETTE && mbmi->palette_mode_info.palette_size[1] == 0 @@ -812,13 +831,8 @@ static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd, p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP; if (av1_is_intra_filter_switchable(p_angle)) { FRAME_COUNTS *counts = xd->counts; -#if CONFIG_EC_MULTISYMBOL mbmi->intra_filter = aom_read_symbol(r, ec_ctx->intra_filter_cdf[ctx], INTRA_FILTERS, ACCT_STR); -#else - mbmi->intra_filter = aom_read_tree( - r, av1_intra_filter_tree, ec_ctx->intra_filter_probs[ctx], ACCT_STR); -#endif // CONFIG_EC_MULTISYMBOL if (counts) ++counts->intra_filter[ctx][mbmi->intra_filter]; } else { mbmi->intra_filter = INTRA_FILTER_LINEAR; @@ -880,29 +894,16 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd, if (inter_block) { if (eset > 0) { -#if CONFIG_EC_MULTISYMBOL *tx_type = av1_ext_tx_inter_inv[eset][aom_read_symbol( r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size], ext_tx_cnt_inter[eset], ACCT_STR)]; -#else - *tx_type = aom_read_tree( - r, av1_ext_tx_inter_tree[eset], - ec_ctx->inter_ext_tx_prob[eset][square_tx_size], ACCT_STR); -#endif if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type]; } } else if (ALLOW_INTRA_EXT_TX) { if (eset > 0) { -#if CONFIG_EC_MULTISYMBOL *tx_type = av1_ext_tx_intra_inv[eset][aom_read_symbol( r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode], ext_tx_cnt_intra[eset], ACCT_STR)]; -#else - *tx_type = aom_read_tree( - r, av1_ext_tx_intra_tree[eset], - ec_ctx->intra_ext_tx_prob[eset][square_tx_size][mbmi->mode], - ACCT_STR); -#endif if (counts) ++counts->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type]; } @@ -923,25 +924,14 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd, FRAME_COUNTS *counts = xd->counts; if (inter_block) { -#if CONFIG_EC_MULTISYMBOL *tx_type = av1_ext_tx_inv[aom_read_symbol( r, ec_ctx->inter_ext_tx_cdf[tx_size], TX_TYPES, ACCT_STR)]; -#else - *tx_type = aom_read_tree(r, av1_ext_tx_tree, - ec_ctx->inter_ext_tx_prob[tx_size], ACCT_STR); -#endif if (counts) ++counts->inter_ext_tx[tx_size][*tx_type]; } else { const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode]; -#if CONFIG_EC_MULTISYMBOL *tx_type = av1_ext_tx_inv[aom_read_symbol( r, ec_ctx->intra_ext_tx_cdf[tx_size][tx_type_nom], TX_TYPES, ACCT_STR)]; -#else - *tx_type = aom_read_tree( - r, av1_ext_tx_tree, ec_ctx->intra_ext_tx_prob[tx_size][tx_type_nom], - ACCT_STR); -#endif if (counts) ++counts->intra_ext_tx[tx_size][tx_type_nom][*tx_type]; } } else { @@ -972,7 +962,6 @@ static INLINE int assign_dv(AV1_COMMON *cm, MACROBLOCKD *xd, int_mv *mv, read_mv(r, &mv->as_mv, &ref_mv->as_mv, &ec_ctx->ndvc, dv_counts, 0); int valid = is_mv_valid(&mv->as_mv) && is_dv_valid(mv->as_mv, &xd->tile, mi_row, mi_col, bsize); - // TODO(aconverse@google.com): additional validation return valid; } #endif // CONFIG_INTRABC @@ -995,7 +984,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm, const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh); #if CONFIG_EC_ADAPT FRAME_CONTEXT *ec_ctx = xd->tile_ctx; -#elif CONFIG_EC_MULTISYMBOL +#else FRAME_CONTEXT *ec_ctx = cm->fc; #endif @@ -1022,24 +1011,56 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm, } #endif - mbmi->tx_size = read_tx_size(cm, xd, 0, 1, r); mbmi->ref_frame[0] = INTRA_FRAME; mbmi->ref_frame[1] = NONE_FRAME; + mbmi->tx_size = read_tx_size(cm, xd, 0, 1, r); #if CONFIG_INTRABC if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools) { - mbmi->use_intrabc = aom_read(r, INTRABC_PROB, ACCT_STR); + mbmi->use_intrabc = aom_read(r, ec_ctx->intrabc_prob, ACCT_STR); if (mbmi->use_intrabc) { - int_mv dv_ref; mbmi->mode = mbmi->uv_mode = DC_PRED; #if CONFIG_DUAL_FILTER for (int idx = 0; idx < 4; ++idx) mbmi->interp_filter[idx] = BILINEAR; #else mbmi->interp_filter = BILINEAR; #endif - av1_find_ref_dv(&dv_ref, mi_row, mi_col); + + int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES]; + int_mv ref_mvs[MAX_MV_REF_CANDIDATES] = {}; + + av1_find_mv_refs(cm, xd, mi, INTRA_FRAME, &xd->ref_mv_count[INTRA_FRAME], + xd->ref_mv_stack[INTRA_FRAME], +#if CONFIG_EXT_INTER + NULL, +#endif // CONFIG_EXT_INTER + ref_mvs, mi_row, mi_col, NULL, NULL, inter_mode_ctx); + + int_mv nearestmv, nearmv; + av1_find_best_ref_mvs(0, ref_mvs, &nearestmv, &nearmv); + + int_mv dv_ref = nearestmv.as_int == 0 ? nearmv : nearestmv; + if (dv_ref.as_int == 0) av1_find_ref_dv(&dv_ref, mi_row, mi_col); + xd->corrupted |= !assign_dv(cm, xd, &mbmi->mv[0], &dv_ref, mi_row, mi_col, bsize, r); +#if CONFIG_VAR_TX + // TODO(aconverse@google.com): Evaluate allowing VAR TX on intrabc blocks + const int width = block_size_wide[bsize] >> tx_size_wide_log2[0]; + const int height = block_size_high[bsize] >> tx_size_high_log2[0]; + int idx, idy; + for (idy = 0; idy < height; ++idy) + for (idx = 0; idx < width; ++idx) + mbmi->inter_tx_size[idy >> 1][idx >> 1] = mbmi->tx_size; + mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); +#endif // CONFIG_VAR_TX +#if CONFIG_EXT_TX && !CONFIG_TXK_SEL + av1_read_tx_type(cm, xd, +#if CONFIG_SUPERTX + 0, +#endif + r); +#endif // CONFIG_EXT_TX && !CONFIG_TXK_SEL return; } } @@ -1048,68 +1069,51 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm, #if CONFIG_CB4X4 (void)i; mbmi->mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, 0)); #else - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); -#endif -#else switch (bsize) { case BLOCK_4X4: for (i = 0; i < 4; ++i) - mi->bmi[i].as_mode = -#if CONFIG_EC_MULTISYMBOL - read_intra_mode(r, - get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, i)); -#else - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, i)); -#endif + mi->bmi[i].as_mode = read_intra_mode( + r, get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, i)); mbmi->mode = mi->bmi[3].as_mode; break; case BLOCK_4X8: mi->bmi[0].as_mode = mi->bmi[2].as_mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, 0)); -#else - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); -#endif mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, 1)); -#else - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 1)); -#endif break; case BLOCK_8X4: mi->bmi[0].as_mode = mi->bmi[1].as_mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, 0)); -#else - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); -#endif mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, 2)); -#else - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 2)); -#endif break; default: mbmi->mode = -#if CONFIG_EC_MULTISYMBOL read_intra_mode(r, get_y_mode_cdf(ec_ctx, mi, above_mi, left_mi, 0)); -#else - read_intra_mode(r, get_y_mode_probs(cm, mi, above_mi, left_mi, 0)); -#endif } #endif #if CONFIG_CB4X4 if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x, - xd->plane[1].subsampling_y)) - mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode); + xd->plane[1].subsampling_y)) { + mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode); #else - mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode); + mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode); +#endif + +#if CONFIG_CFL + // TODO(ltrudeau) support PALETTE + if (mbmi->uv_mode == DC_PRED) { + mbmi->cfl_alpha_idx = + read_cfl_alphas(ec_ctx, r, mbmi->skip, mbmi->cfl_alpha_signs); + } +#endif // CONFIG_CFL + +#if CONFIG_CB4X4 + } #endif #if CONFIG_EXT_INTRA @@ -1125,7 +1129,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm, mbmi->filter_intra_mode_info.use_filter_intra_mode[0] = 0; mbmi->filter_intra_mode_info.use_filter_intra_mode[1] = 0; if (bsize >= BLOCK_8X8 || CONFIG_CB4X4) - read_filter_intra_mode_info(cm, xd, r); + read_filter_intra_mode_info(cm, xd, mi_row, mi_col, r); #endif // CONFIG_FILTER_INTRA #if !CONFIG_TXK_SEL @@ -1141,11 +1145,7 @@ static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) { int mag, d, fr, hp; const int sign = aom_read(r, mvcomp->sign, ACCT_STR); const int mv_class = -#if CONFIG_EC_MULTISYMBOL aom_read_symbol(r, mvcomp->class_cdf, MV_CLASSES, ACCT_STR); -#else - aom_read_tree(r, av1_mv_class_tree, mvcomp->classes, ACCT_STR); -#endif const int class0 = mv_class == MV_CLASS_0; // Integer part @@ -1161,14 +1161,9 @@ static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) { mag = CLASS0_SIZE << (mv_class + 2); } -// Fractional part -#if CONFIG_EC_MULTISYMBOL + // Fractional part fr = aom_read_symbol(r, class0 ? mvcomp->class0_fp_cdf[d] : mvcomp->fp_cdf, MV_FP_SIZE, ACCT_STR); -#else - fr = aom_read_tree(r, av1_mv_fp_tree, - class0 ? mvcomp->class0_fp[d] : mvcomp->fp, ACCT_STR); -#endif // High precision part (if hp is not used, the default value of the hp is 1) hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp, ACCT_STR) @@ -1185,11 +1180,7 @@ static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref, MV_JOINT_TYPE joint_type; MV diff = { 0, 0 }; joint_type = -#if CONFIG_EC_MULTISYMBOL (MV_JOINT_TYPE)aom_read_symbol(r, ctx->joint_cdf, MV_JOINTS, ACCT_STR); -#else - (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints, ACCT_STR); -#endif if (mv_joint_vertical(joint_type)) diff.row = read_mv_component(r, &ctx->comps[0], allow_hp); @@ -1207,7 +1198,7 @@ static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm, const MACROBLOCKD *xd, aom_reader *r) { #if !SUB8X8_COMP_REF - if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) return SINGLE_REFERENCE; + if (xd->mi[0]->mbmi.sb_type == BLOCK_4X4) return SINGLE_REFERENCE; #endif if (cm->reference_mode == REFERENCE_MODE_SELECT) { const int ctx = av1_get_reference_mode_context(cm, xd); @@ -1236,7 +1227,7 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd, const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r); // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding if (mode == COMPOUND_REFERENCE) { -#if CONFIG_LOWDELAY_COMPOUND // Normative in decoder (for low delay) +#if CONFIG_ONE_SIDED_COMPOUND // Normative in decoder (for low delay) const int idx = 1; #else #if CONFIG_EXT_REFS @@ -1356,16 +1347,10 @@ static INLINE void read_mb_interp_filter(AV1_COMMON *const cm, if (has_subpel_mv_component(xd->mi[0], xd, dir) || (mbmi->ref_frame[1] > INTRA_FRAME && has_subpel_mv_component(xd->mi[0], xd, dir + 2))) { -#if CONFIG_EC_MULTISYMBOL mbmi->interp_filter[dir] = (InterpFilter)av1_switchable_interp_inv[aom_read_symbol( r, ec_ctx->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS, ACCT_STR)]; -#else - mbmi->interp_filter[dir] = (InterpFilter)aom_read_tree( - r, av1_switchable_interp_tree, ec_ctx->switchable_interp_prob[ctx], - ACCT_STR); -#endif if (counts) ++counts->switchable_interp[ctx][mbmi->interp_filter[dir]]; } } @@ -1375,21 +1360,15 @@ static INLINE void read_mb_interp_filter(AV1_COMMON *const cm, mbmi->interp_filter[2] = mbmi->interp_filter[0]; mbmi->interp_filter[3] = mbmi->interp_filter[1]; } -#else // CONFIG_DUAL_FILTER +#else // CONFIG_DUAL_FILTER if (cm->interp_filter != SWITCHABLE) { mbmi->interp_filter = cm->interp_filter; } else { const int ctx = av1_get_pred_context_switchable_interp(xd); -#if CONFIG_EC_MULTISYMBOL mbmi->interp_filter = (InterpFilter)av1_switchable_interp_inv[aom_read_symbol( r, ec_ctx->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS, ACCT_STR)]; -#else - mbmi->interp_filter = (InterpFilter)aom_read_tree( - r, av1_switchable_interp_tree, ec_ctx->switchable_interp_prob[ctx], - ACCT_STR); -#endif if (counts) ++counts->switchable_interp[ctx][mbmi->interp_filter]; } #endif // CONFIG_DUAL_FILTER @@ -1405,41 +1384,66 @@ static void read_intra_block_mode_info(AV1_COMMON *const cm, const int mi_row, mbmi->ref_frame[0] = INTRA_FRAME; mbmi->ref_frame[1] = NONE_FRAME; +#if CONFIG_EC_ADAPT + FRAME_CONTEXT *ec_ctx = xd->tile_ctx; +#else + FRAME_CONTEXT *ec_ctx = cm->fc; +#endif + #if CONFIG_CB4X4 (void)i; - mbmi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]); + mbmi->mode = read_intra_mode_y(ec_ctx, xd, r, size_group_lookup[bsize]); #else switch (bsize) { case BLOCK_4X4: for (i = 0; i < 4; ++i) - mi->bmi[i].as_mode = read_intra_mode_y(cm, xd, r, 0); + mi->bmi[i].as_mode = read_intra_mode_y(ec_ctx, xd, r, 0); mbmi->mode = mi->bmi[3].as_mode; break; case BLOCK_4X8: - mi->bmi[0].as_mode = mi->bmi[2].as_mode = read_intra_mode_y(cm, xd, r, 0); + mi->bmi[0].as_mode = mi->bmi[2].as_mode = + read_intra_mode_y(ec_ctx, xd, r, 0); mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode = - read_intra_mode_y(cm, xd, r, 0); + read_intra_mode_y(ec_ctx, xd, r, 0); break; case BLOCK_8X4: - mi->bmi[0].as_mode = mi->bmi[1].as_mode = read_intra_mode_y(cm, xd, r, 0); + mi->bmi[0].as_mode = mi->bmi[1].as_mode = + read_intra_mode_y(ec_ctx, xd, r, 0); mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode = - read_intra_mode_y(cm, xd, r, 0); + read_intra_mode_y(ec_ctx, xd, r, 0); break; default: - mbmi->mode = read_intra_mode_y(cm, xd, r, size_group_lookup[bsize]); + mbmi->mode = read_intra_mode_y(ec_ctx, xd, r, size_group_lookup[bsize]); } #endif #if CONFIG_CB4X4 if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x, - xd->plane[1].subsampling_y)) - mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode); + xd->plane[1].subsampling_y)) { + mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode); #else - mbmi->uv_mode = read_intra_mode_uv(cm, xd, r, mbmi->mode); + mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode); (void)mi_row; (void)mi_col; #endif +#if CONFIG_CFL + // TODO(ltrudeau) support PALETTE + if (mbmi->uv_mode == DC_PRED) { + mbmi->cfl_alpha_idx = read_cfl_alphas( +#if CONFIG_EC_ADAPT + xd->tile_ctx, +#else + cm->fc, +#endif // CONFIG_EC_ADAPT + r, mbmi->skip, mbmi->cfl_alpha_signs); + } +#endif // CONFIG_CFL + +#if CONFIG_CB4X4 + } +#endif + #if CONFIG_EXT_INTRA read_intra_angle_info(cm, xd, r); #endif // CONFIG_EXT_INTRA @@ -1453,7 +1457,7 @@ static void read_intra_block_mode_info(AV1_COMMON *const cm, const int mi_row, mbmi->filter_intra_mode_info.use_filter_intra_mode[0] = 0; mbmi->filter_intra_mode_info.use_filter_intra_mode[1] = 0; if (bsize >= BLOCK_8X8 || CONFIG_CB4X4) - read_filter_intra_mode_info(cm, xd, r); + read_filter_intra_mode_info(cm, xd, mi_row, mi_col, r); #endif // CONFIG_FILTER_INTRA } @@ -1477,7 +1481,6 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, FRAME_CONTEXT *ec_ctx = cm->fc; #endif BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type; -#if CONFIG_REF_MV MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; #if CONFIG_CB4X4 int_mv *pred_mv = mbmi->pred_mv; @@ -1486,9 +1489,6 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, int_mv *pred_mv = (bsize >= BLOCK_8X8) ? mbmi->pred_mv : xd->mi[0]->bmi[block].pred_mv; #endif // CONFIG_CB4X4 -#else - (void)block; -#endif // CONFIG_REF_MV (void)ref_frame; (void)cm; (void)mi_row; @@ -1498,12 +1498,7 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, switch (mode) { case NEWMV: { FRAME_COUNTS *counts = xd->counts; -#if !CONFIG_REF_MV - nmv_context *const nmvc = &ec_ctx->nmvc; - nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL; -#endif for (i = 0; i < 1 + is_compound; ++i) { -#if CONFIG_REF_MV int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame); int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type], xd->ref_mv_stack[rf_type], i, @@ -1511,13 +1506,10 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx]; nmv_context_counts *const mv_counts = counts ? &counts->mv[nmv_ctx] : NULL; -#endif read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, nmvc, mv_counts, allow_hp); ret = ret && is_mv_valid(&mv[i].as_mv); -#if CONFIG_REF_MV pred_mv[i].as_int = ref_mv[i].as_int; -#endif } break; } @@ -1525,20 +1517,16 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, mv[0].as_int = nearest_mv[0].as_int; if (is_compound) mv[1].as_int = nearest_mv[1].as_int; -#if CONFIG_REF_MV pred_mv[0].as_int = nearest_mv[0].as_int; if (is_compound) pred_mv[1].as_int = nearest_mv[1].as_int; -#endif break; } case NEARMV: { mv[0].as_int = near_mv[0].as_int; if (is_compound) mv[1].as_int = near_mv[1].as_int; -#if CONFIG_REF_MV pred_mv[0].as_int = near_mv[0].as_int; if (is_compound) pred_mv[1].as_int = near_mv[1].as_int; -#endif break; } case ZEROMV: { @@ -1557,22 +1545,15 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, if (is_compound) mv[1].as_int = 0; #endif // CONFIG_GLOBAL_MOTION -#if CONFIG_REF_MV pred_mv[0].as_int = mv[0].as_int; if (is_compound) pred_mv[1].as_int = mv[1].as_int; -#endif break; } #if CONFIG_EXT_INTER case NEW_NEWMV: { FRAME_COUNTS *counts = xd->counts; -#if !CONFIG_REF_MV - nmv_context *const nmvc = &ec_ctx->nmvc; - nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL; -#endif assert(is_compound); for (i = 0; i < 2; ++i) { -#if CONFIG_REF_MV int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame); int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type], xd->ref_mv_stack[rf_type], i, @@ -1580,7 +1561,6 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx]; nmv_context_counts *const mv_counts = counts ? &counts->mv[nmv_ctx] : NULL; -#endif read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, nmvc, mv_counts, allow_hp); ret = ret && is_mv_valid(&mv[i].as_mv); } @@ -1592,18 +1572,6 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, mv[1].as_int = nearest_mv[1].as_int; break; } - case NEAREST_NEARMV: { - assert(is_compound); - mv[0].as_int = nearest_mv[0].as_int; - mv[1].as_int = near_mv[1].as_int; - break; - } - case NEAR_NEARESTMV: { - assert(is_compound); - mv[0].as_int = near_mv[0].as_int; - mv[1].as_int = nearest_mv[1].as_int; - break; - } case NEAR_NEARMV: { assert(is_compound); mv[0].as_int = near_mv[0].as_int; @@ -1612,17 +1580,12 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, } case NEW_NEARESTMV: { FRAME_COUNTS *counts = xd->counts; -#if CONFIG_REF_MV int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame); int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type], xd->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx); nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx]; nmv_context_counts *const mv_counts = counts ? &counts->mv[nmv_ctx] : NULL; -#else - nmv_context *const nmvc = &ec_ctx->nmvc; - nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL; -#endif read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, nmvc, mv_counts, allow_hp); assert(is_compound); ret = ret && is_mv_valid(&mv[0].as_mv); @@ -1631,17 +1594,12 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, } case NEAREST_NEWMV: { FRAME_COUNTS *counts = xd->counts; -#if CONFIG_REF_MV int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame); int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type], xd->ref_mv_stack[rf_type], 1, mbmi->ref_mv_idx); nmv_context_counts *const mv_counts = counts ? &counts->mv[nmv_ctx] : NULL; nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx]; -#else - nmv_context *const nmvc = &ec_ctx->nmvc; - nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL; -#endif mv[0].as_int = nearest_mv[0].as_int; read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, nmvc, mv_counts, allow_hp); assert(is_compound); @@ -1650,17 +1608,12 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, } case NEAR_NEWMV: { FRAME_COUNTS *counts = xd->counts; -#if CONFIG_REF_MV int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame); int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type], xd->ref_mv_stack[rf_type], 1, mbmi->ref_mv_idx); nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx]; nmv_context_counts *const mv_counts = counts ? &counts->mv[nmv_ctx] : NULL; -#else - nmv_context *const nmvc = &ec_ctx->nmvc; - nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL; -#endif mv[0].as_int = near_mv[0].as_int; read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, nmvc, mv_counts, allow_hp); assert(is_compound); @@ -1670,17 +1623,12 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd, } case NEW_NEARMV: { FRAME_COUNTS *counts = xd->counts; -#if CONFIG_REF_MV int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame); int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type], xd->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx); nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx]; nmv_context_counts *const mv_counts = counts ? &counts->mv[nmv_ctx] : NULL; -#else - nmv_context *const nmvc = &ec_ctx->nmvc; - nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL; -#endif read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, nmvc, mv_counts, allow_hp); assert(is_compound); ret = ret && is_mv_valid(&mv[0].as_mv); @@ -1748,9 +1696,9 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, int_mv ref_mvs[MODE_CTX_REF_FRAMES][MAX_MV_REF_CANDIDATES]; int ref, is_compound; int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES]; -#if CONFIG_REF_MV && CONFIG_EXT_INTER +#if CONFIG_EXT_INTER int16_t compound_inter_mode_ctx[MODE_CTX_REF_FRAMES]; -#endif // CONFIG_REF_MV && CONFIG_EXT_INTER +#endif // CONFIG_EXT_INTER int16_t mode_ctx = 0; #if CONFIG_WARPED_MOTION int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE]; @@ -1761,6 +1709,8 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, FRAME_CONTEXT *ec_ctx = cm->fc; #endif + assert(NELEMENTS(mode_2_counter) == MB_MODE_COUNT); + #if CONFIG_PALETTE mbmi->palette_mode_info.palette_size[0] = 0; mbmi->palette_mode_info.palette_size[1] = 0; @@ -1774,18 +1724,14 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, for (ref = 0; ref < 1 + is_compound; ++ref) { MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref]; - av1_find_mv_refs(cm, xd, mi, frame, -#if CONFIG_REF_MV - &xd->ref_mv_count[frame], xd->ref_mv_stack[frame], + av1_find_mv_refs( + cm, xd, mi, frame, &xd->ref_mv_count[frame], xd->ref_mv_stack[frame], #if CONFIG_EXT_INTER - compound_inter_mode_ctx, + compound_inter_mode_ctx, #endif // CONFIG_EXT_INTER -#endif - ref_mvs[frame], mi_row, mi_col, fpm_sync, (void *)pbi, - inter_mode_ctx); + ref_mvs[frame], mi_row, mi_col, fpm_sync, (void *)pbi, inter_mode_ctx); } -#if CONFIG_REF_MV if (is_compound) { MV_REFERENCE_FRAME ref_frame = av1_ref_frame_type(mbmi->ref_frame); av1_find_mv_refs(cm, xd, mi, ref_frame, &xd->ref_mv_count[ref_frame], @@ -1833,9 +1779,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, mode_ctx = av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1); mbmi->ref_mv_idx = 0; -#else - mode_ctx = inter_mode_ctx[mbmi->ref_frame[0]]; -#endif if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { mbmi->mode = ZEROMV; @@ -1852,7 +1795,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, else #endif // CONFIG_EXT_INTER mbmi->mode = read_inter_mode(ec_ctx, xd, r, mode_ctx); -#if CONFIG_REF_MV #if CONFIG_EXT_INTER if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV || have_nearmv_in_inter_mode(mbmi->mode)) @@ -1860,7 +1802,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, if (mbmi->mode == NEARMV || mbmi->mode == NEWMV) #endif read_drl_idx(cm, xd, mbmi, r); -#endif } } @@ -1876,7 +1817,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, } } -#if CONFIG_REF_MV if (mbmi->ref_mv_idx > 0) { int_mv cur_mv = xd->ref_mv_stack[mbmi->ref_frame[0]][1 + mbmi->ref_mv_idx].this_mv; @@ -1905,10 +1845,10 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, lower_mv_precision(&nearestmv[0].as_mv, allow_hp); lower_mv_precision(&nearestmv[1].as_mv, allow_hp); #if CONFIG_EXT_INTER - } else if (mbmi->mode == NEAREST_NEWMV || mbmi->mode == NEAREST_NEARMV) { + } else if (mbmi->mode == NEAREST_NEWMV) { nearestmv[0] = xd->ref_mv_stack[ref_frame_type][0].this_mv; lower_mv_precision(&nearestmv[0].as_mv, allow_hp); - } else if (mbmi->mode == NEW_NEARESTMV || mbmi->mode == NEAR_NEARESTMV) { + } else if (mbmi->mode == NEW_NEARESTMV) { nearestmv[1] = xd->ref_mv_stack[ref_frame_type][0].comp_mv; lower_mv_precision(&nearestmv[1].as_mv, allow_hp); } @@ -1938,7 +1878,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, } #endif // CONFIG_EXT_INTER } -#endif #if !CONFIG_DUAL_FILTER && !CONFIG_WARPED_MOTION && !CONFIG_GLOBAL_MOTION read_mb_interp_filter(cm, xd, mbmi, r); @@ -1958,13 +1897,11 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, int_mv block[2]; const int j = idy * 2 + idx; int_mv ref_mv_s8[2]; -#if CONFIG_REF_MV #if CONFIG_EXT_INTER if (!is_compound) #endif // CONFIG_EXT_INTER mode_ctx = av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, j); -#endif #if CONFIG_EXT_INTER if (is_compound) b_mode = read_inter_compound_mode(cm, xd, r, mode_ctx); @@ -1977,10 +1914,8 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, #else if (b_mode != ZEROMV) { #endif // CONFIG_EXT_INTER -#if CONFIG_REF_MV CANDIDATE_MV ref_mv_stack[2][MAX_REF_MV_STACK_SIZE]; uint8_t ref_mv_count[2]; -#endif for (ref = 0; ref < 1 + is_compound; ++ref) #if CONFIG_EXT_INTER { @@ -1989,9 +1924,7 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, j, mi_row, mi_col, NULL); #endif // CONFIG_EXT_INTER av1_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col, -#if CONFIG_REF_MV ref_mv_stack[ref], &ref_mv_count[ref], -#endif #if CONFIG_EXT_INTER mv_ref_list, #endif // CONFIG_EXT_INTER @@ -2009,12 +1942,8 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, } for (ref = 0; ref < 1 + is_compound && b_mode != ZEROMV; ++ref) { -#if CONFIG_REF_MV ref_mv_s8[ref] = nearest_sub8x8[ref]; lower_mv_precision(&ref_mv_s8[ref].as_mv, allow_hp); -#else - ref_mv_s8[ref] = nearestmv[ref]; -#endif } #if CONFIG_EXT_INTER (void)ref_mv_s8; @@ -2041,10 +1970,8 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, } } -#if CONFIG_REF_MV mbmi->pred_mv[0].as_int = mi->bmi[3].pred_mv[0].as_int; mbmi->pred_mv[1].as_int = mi->bmi[3].pred_mv[1].as_int; -#endif mi->mbmi.mode = b_mode; mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; @@ -2056,42 +1983,35 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, #if CONFIG_EXT_INTER if (is_compound) { -#if CONFIG_REF_MV int ref_mv_idx = mbmi->ref_mv_idx; // Special case: NEAR_NEWMV and NEW_NEARMV modes use // 1 + mbmi->ref_mv_idx (like NEARMV) instead of // mbmi->ref_mv_idx (like NEWMV) if (mbmi->mode == NEAR_NEWMV || mbmi->mode == NEW_NEARMV) ref_mv_idx = 1 + mbmi->ref_mv_idx; -#endif if (compound_ref0_mode(mbmi->mode) == NEWMV) { -#if CONFIG_REF_MV uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame); if (xd->ref_mv_count[ref_frame_type] > 1) { ref_mv[0] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv; clamp_mv_ref(&ref_mv[0].as_mv, xd->n8_w << MI_SIZE_LOG2, xd->n8_h << MI_SIZE_LOG2, xd); } -#endif nearestmv[0] = ref_mv[0]; } if (compound_ref1_mode(mbmi->mode) == NEWMV) { -#if CONFIG_REF_MV uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame); if (xd->ref_mv_count[ref_frame_type] > 1) { ref_mv[1] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv; clamp_mv_ref(&ref_mv[1].as_mv, xd->n8_w << MI_SIZE_LOG2, xd->n8_h << MI_SIZE_LOG2, xd); } -#endif nearestmv[1] = ref_mv[1]; } } else { #endif // CONFIG_EXT_INTER if (mbmi->mode == NEWMV) { for (ref = 0; ref < 1 + is_compound; ++ref) { -#if CONFIG_REF_MV uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame); if (xd->ref_mv_count[ref_frame_type] > 1) { ref_mv[ref] = @@ -2102,7 +2022,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, clamp_mv_ref(&ref_mv[ref].as_mv, xd->n8_w << MI_SIZE_LOG2, xd->n8_h << MI_SIZE_LOG2, xd); } -#endif nearestmv[ref] = ref_mv[ref]; } } @@ -2116,13 +2035,13 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, aom_merge_corrupted_flag(&xd->corrupted, mv_corrupted_flag); } -#if CONFIG_EXT_INTER +#if CONFIG_EXT_INTER && CONFIG_INTERINTRA mbmi->use_wedge_interintra = 0; if (cm->reference_mode != COMPOUND_REFERENCE && #if CONFIG_SUPERTX !supertx_enabled && #endif - is_interintra_allowed(mbmi)) { + cm->allow_interintra_compound && is_interintra_allowed(mbmi)) { const int bsize_group = size_group_lookup[bsize]; const int interintra = aom_read(r, cm->fc->interintra_prob[bsize_group], ACCT_STR); @@ -2157,7 +2076,7 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, } } } -#endif // CONFIG_EXT_INTER +#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION mbmi->motion_mode = SIMPLE_TRANSLATION; @@ -2201,22 +2120,24 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, ) { if (is_any_masked_compound_used(bsize)) { #if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE - mbmi->interinter_compound_type = - aom_read_tree(r, av1_compound_type_tree, - cm->fc->compound_type_prob[bsize], ACCT_STR); -#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE + if (cm->allow_masked_compound) { + mbmi->interinter_compound_type = + aom_read_tree(r, av1_compound_type_tree, + cm->fc->compound_type_prob[bsize], ACCT_STR); #if CONFIG_WEDGE - if (mbmi->interinter_compound_type == COMPOUND_WEDGE) { - mbmi->wedge_index = - aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR); - mbmi->wedge_sign = aom_read_bit(r, ACCT_STR); - } + if (mbmi->interinter_compound_type == COMPOUND_WEDGE) { + mbmi->wedge_index = + aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR); + mbmi->wedge_sign = aom_read_bit(r, ACCT_STR); + } #endif // CONFIG_WEDGE #if CONFIG_COMPOUND_SEGMENT - if (mbmi->interinter_compound_type == COMPOUND_SEG) { - mbmi->mask_type = aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR); - } + if (mbmi->interinter_compound_type == COMPOUND_SEG) { + mbmi->mask_type = aom_read_literal(r, MAX_SEG_MASK_BITS, ACCT_STR); + } #endif // CONFIG_COMPOUND_SEGMENT + } +#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE } else { mbmi->interinter_compound_type = COMPOUND_AVERAGE; } @@ -2278,9 +2199,10 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi, inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r); #if CONFIG_VAR_TX - xd->above_txfm_context = cm->above_txfm_context + mi_col; - xd->left_txfm_context = - xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK); + xd->above_txfm_context = + cm->above_txfm_context + (mi_col << TX_UNIT_WIDE_LOG2); + xd->left_txfm_context = xd->left_txfm_context_buffer + + ((mi_row & MAX_MIB_MASK) << TX_UNIT_HIGH_LOG2); if (cm->tx_mode == TX_MODE_SELECT && #if CONFIG_CB4X4 @@ -2371,7 +2293,6 @@ void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, if (frame_is_intra_only(cm)) { read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r); -#if CONFIG_REF_MV for (h = 0; h < y_mis; ++h) { MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols; for (w = 0; w < x_mis; ++w) { @@ -2380,7 +2301,6 @@ void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, mv->ref_frame[1] = NONE_FRAME; } } -#endif } else { read_inter_frame_mode_info(pbi, xd, #if CONFIG_SUPERTX @@ -2395,10 +2315,8 @@ void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, mv->ref_frame[1] = mi->mbmi.ref_frame[1]; mv->mv[0].as_int = mi->mbmi.mv[0].as_int; mv->mv[1].as_int = mi->mbmi.mv[1].as_int; -#if CONFIG_REF_MV mv->pred_mv[0].as_int = mi->mbmi.pred_mv[0].as_int; mv->pred_mv[1].as_int = mi->mbmi.pred_mv[1].as_int; -#endif } } } diff --git a/third_party/aom/av1/decoder/decoder.c b/third_party/aom/av1/decoder/decoder.c index 1bd91086e..b4f37d4e7 100644 --- a/third_party/aom/av1/decoder/decoder.c +++ b/third_party/aom/av1/decoder/decoder.c @@ -50,7 +50,6 @@ static void initialize_dec(void) { av1_init_wedge_masks(); #endif // CONFIG_EXT_INTER init_done = 1; -#if CONFIG_EC_MULTISYMBOL av1_indices_from_tree(av1_intra_mode_ind, av1_intra_mode_inv, av1_intra_mode_tree); av1_indices_from_tree(av1_switchable_interp_ind, av1_switchable_interp_inv, @@ -68,7 +67,6 @@ static void initialize_dec(void) { #endif av1_indices_from_tree(av1_inter_mode_ind, av1_inter_mode_inv, av1_inter_mode_tree); -#endif } } diff --git a/third_party/aom/av1/decoder/decoder.h b/third_party/aom/av1/decoder/decoder.h index 4a90b4ad5..139fde1c0 100644 --- a/third_party/aom/av1/decoder/decoder.h +++ b/third_party/aom/av1/decoder/decoder.h @@ -203,7 +203,7 @@ static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs, } } -#if CONFIG_EXT_REFS +#if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING static INLINE int dec_is_ref_frame_buf(AV1Decoder *const pbi, RefCntBuffer *frame_buf) { AV1_COMMON *const cm = &pbi->common; diff --git a/third_party/aom/av1/decoder/decodetxb.c b/third_party/aom/av1/decoder/decodetxb.c index e1db09775..90685a18d 100644 --- a/third_party/aom/av1/decoder/decodetxb.c +++ b/third_party/aom/av1/decoder/decodetxb.c @@ -169,7 +169,7 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd, sign = aom_read_bit(r, ACCT_STR); } - ctx = get_level_ctx(tcoeffs, scan[c], bwl); + ctx = get_br_ctx(tcoeffs, scan[c], bwl); if (cm->fc->coeff_lps[tx_size][plane_type][ctx] == 0) exit(0); diff --git a/third_party/aom/av1/decoder/detokenize.c b/third_party/aom/av1/decoder/detokenize.c index 494f1681f..2e3309c07 100644 --- a/third_party/aom/av1/decoder/detokenize.c +++ b/third_party/aom/av1/decoder/detokenize.c @@ -106,7 +106,7 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, dequant_val_type_nuq *dq_val, #endif // CONFIG_NEW_QUANT #if CONFIG_AOM_QM - const qm_val_t *iqm[2][TX_SIZES], + const qm_val_t *iqm[2][TX_SIZES_ALL], #endif // CONFIG_AOM_QM int ctx, const int16_t *scan, const int16_t *nb, int16_t *max_scan_line, aom_reader *r) { @@ -123,7 +123,6 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, #endif // CONFIG_AOM_QM int band, c = 0; const int tx_size_ctx = txsize_sqr_map[tx_size]; -#if CONFIG_NEW_TOKENSET aom_cdf_prob(*coef_head_cdfs)[COEFF_CONTEXTS][CDF_SIZE(ENTROPY_TOKENS)] = ec_ctx->coef_head_cdfs[tx_size_ctx][type][ref]; aom_cdf_prob(*coef_tail_cdfs)[COEFF_CONTEXTS][CDF_SIZE(ENTROPY_TOKENS)] = @@ -135,18 +134,6 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1] = NULL; unsigned int(*eob_branch_count)[COEFF_CONTEXTS] = NULL; #endif -#else - aom_prob(*coef_probs)[COEFF_CONTEXTS][UNCONSTRAINED_NODES] = - ec_ctx->coef_probs[tx_size_ctx][type][ref]; - const aom_prob *prob; -#if CONFIG_EC_MULTISYMBOL - aom_cdf_prob(*coef_cdfs)[COEFF_CONTEXTS][CDF_SIZE(ENTROPY_TOKENS)] = - ec_ctx->coef_cdfs[tx_size_ctx][type][ref]; - aom_cdf_prob(*cdf)[CDF_SIZE(ENTROPY_TOKENS)]; -#endif // CONFIG_EC_MULTISYMBOL - unsigned int(*coef_counts)[COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1] = NULL; - unsigned int(*eob_branch_count)[COEFF_CONTEXTS] = NULL; -#endif // CONFIG_NEW_TOKENSET uint8_t token_cache[MAX_TX_SQUARE]; const uint8_t *band_translate = get_band_translate(tx_size); int dq_shift; @@ -156,23 +143,17 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, const tran_low_t *dqv_val = &dq_val[0][0]; #endif // CONFIG_NEW_QUANT (void)tx_type; -#if CONFIG_AOM_QM - (void)iqmatrix; -#endif // CONFIG_AOM_QM if (counts) { -#if !CONFIG_NEW_TOKENSET || !CONFIG_EC_ADAPT +#if !CONFIG_EC_ADAPT coef_counts = counts->coef[tx_size_ctx][type][ref]; eob_branch_count = counts->eob_branch[tx_size_ctx][type][ref]; -#endif -#if CONFIG_NEW_TOKENSET && !CONFIG_EC_ADAPT blockz_count = counts->blockz_count[tx_size_ctx][type][ref][ctx]; #endif } dq_shift = av1_get_tx_scale(tx_size); -#if CONFIG_NEW_TOKENSET band = *band_translate++; int more_data = 1; @@ -238,12 +219,7 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, *max_scan_line = AOMMAX(*max_scan_line, scan[c]); token_cache[scan[c]] = av1_pt_energy_class[token]; - val = token_to_value(r, token, tx_size, -#if CONFIG_HIGHBITDEPTH - xd->bd); -#else - 8); -#endif // CONFIG_HIGHBITDEPTH + val = token_to_value(r, token, tx_size, xd->bd); #if CONFIG_NEW_QUANT v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val); @@ -258,11 +234,7 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, v = aom_read_bit(r, ACCT_STR) ? -v : v; #if CONFIG_COEFFICIENT_RANGE_CHECKING -#if CONFIG_HIGHBITDEPTH check_range(v, xd->bd); -#else - check_range(v, 8); -#endif // CONFIG_HIGHBITDEPTH #endif // CONFIG_COEFFICIENT_RANGE_CHECKING dqcoeff[scan[c]] = v; @@ -273,94 +245,6 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff, dqv = dq[1]; ctx = get_coef_context(nb, token_cache, c); band = *band_translate++; - -#else // CONFIG_NEW_TOKENSET - while (c < max_eob) { - int val = -1; - band = *band_translate++; - prob = coef_probs[band][ctx]; - if (counts) ++eob_branch_count[band][ctx]; - if (!aom_read(r, prob[EOB_CONTEXT_NODE], ACCT_STR)) { - INCREMENT_COUNT(EOB_MODEL_TOKEN); - break; - } - -#if CONFIG_NEW_QUANT - dqv_val = &dq_val[band][0]; -#endif // CONFIG_NEW_QUANT - - while (!aom_read(r, prob[ZERO_CONTEXT_NODE], ACCT_STR)) { - INCREMENT_COUNT(ZERO_TOKEN); - dqv = dq[1]; - token_cache[scan[c]] = 0; - ++c; - if (c >= max_eob) return c; // zero tokens at the end (no eob token) - ctx = get_coef_context(nb, token_cache, c); - band = *band_translate++; - prob = coef_probs[band][ctx]; -#if CONFIG_NEW_QUANT - dqv_val = &dq_val[band][0]; -#endif // CONFIG_NEW_QUANT - } - - *max_scan_line = AOMMAX(*max_scan_line, scan[c]); - -#if CONFIG_EC_MULTISYMBOL - cdf = &coef_cdfs[band][ctx]; - token = ONE_TOKEN + - aom_read_symbol(r, *cdf, CATEGORY6_TOKEN - ONE_TOKEN + 1, ACCT_STR); - INCREMENT_COUNT(ONE_TOKEN + (token > ONE_TOKEN)); - assert(token != ZERO_TOKEN); - val = token_to_value(r, token, tx_size, -#if CONFIG_HIGHBITDEPTH - xd->bd); -#else - 8); -#endif // CONFIG_HIGHBITDEPTH -#else // CONFIG_EC_MULTISYMBOL - if (!aom_read(r, prob[ONE_CONTEXT_NODE], ACCT_STR)) { - INCREMENT_COUNT(ONE_TOKEN); - token = ONE_TOKEN; - val = 1; - } else { - INCREMENT_COUNT(TWO_TOKEN); - token = aom_read_tree(r, av1_coef_con_tree, - av1_pareto8_full[prob[PIVOT_NODE] - 1], ACCT_STR); - assert(token != ZERO_TOKEN && token != ONE_TOKEN); - val = token_to_value(r, token, tx_size, -#if CONFIG_HIGHBITDEPTH - xd->bd); -#else - 8); -#endif // CONFIG_HIGHBITDEPTH - } -#endif // CONFIG_EC_MULTISYMBOL -#if CONFIG_NEW_QUANT - v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val); - v = dq_shift ? ROUND_POWER_OF_TWO(v, dq_shift) : v; -#else -#if CONFIG_AOM_QM - dqv = ((iqmatrix[scan[c]] * (int)dqv) + (1 << (AOM_QM_BITS - 1))) >> - AOM_QM_BITS; -#endif - v = (val * dqv) >> dq_shift; -#endif // CONFIG_NEW_QUANT - -#if CONFIG_COEFFICIENT_RANGE_CHECKING -#if CONFIG_HIGHBITDEPTH - dqcoeff[scan[c]] = - highbd_check_range((aom_read_bit(r, ACCT_STR) ? -v : v), xd->bd); -#else - dqcoeff[scan[c]] = check_range(aom_read_bit(r, ACCT_STR) ? -v : v, 8); -#endif // CONFIG_HIGHBITDEPTH -#else - dqcoeff[scan[c]] = aom_read_bit(r, ACCT_STR) ? -v : v; -#endif // CONFIG_COEFFICIENT_RANGE_CHECKING - token_cache[scan[c]] = av1_pt_energy_class[token]; - ++c; - ctx = get_coef_context(nb, token_cache, c); - dqv = dq[1]; -#endif // CONFIG_NEW_TOKENSET } return c; diff --git a/third_party/aom/av1/decoder/inspection.c b/third_party/aom/av1/decoder/inspection.c index 2e8a61087..926c77e17 100644 --- a/third_party/aom/av1/decoder/inspection.c +++ b/third_party/aom/av1/decoder/inspection.c @@ -14,6 +14,9 @@ #if CONFIG_CDEF #include "av1/common/cdef.h" #endif +#if CONFIG_CFL +#include "av1/common/cfl.h" +#endif void ifd_init(insp_frame_data *fd, int frame_width, int frame_height) { fd->mi_cols = ALIGN_POWER_OF_TWO(frame_width, 3) >> MI_SIZE_LOG2; @@ -97,6 +100,16 @@ int ifd_inspect(insp_frame_data *fd, void *decoder) { cm->cdef_strengths[mbmi->cdef_strength] % CLPF_STRENGTHS; mi->cdef_strength += mi->cdef_strength == 3; #endif +#if CONFIG_CFL + if (mbmi->uv_mode == DC_PRED) { + mi->cfl_alpha_idx = mbmi->cfl_alpha_idx; + mi->cfl_alpha_sign = (mbmi->cfl_alpha_signs[CFL_PRED_V] << CFL_PRED_V) + + mbmi->cfl_alpha_signs[CFL_PRED_U]; + } else { + mi->cfl_alpha_idx = 0; + mi->cfl_alpha_sign = 0; + } +#endif } } return 1; diff --git a/third_party/aom/av1/decoder/inspection.h b/third_party/aom/av1/decoder/inspection.h index d6cf4319a..06a94b737 100644 --- a/third_party/aom/av1/decoder/inspection.h +++ b/third_party/aom/av1/decoder/inspection.h @@ -15,6 +15,7 @@ extern "C" { #endif // __cplusplus +#include "av1/common/seg_common.h" #if CONFIG_ACCOUNTING #include "av1/decoder/accounting.h" #endif @@ -49,6 +50,10 @@ struct insp_mi_data { int8_t cdef_level; int8_t cdef_strength; #endif +#if CONFIG_CFL + int8_t cfl_alpha_idx; + int8_t cfl_alpha_sign; +#endif }; typedef struct insp_frame_data insp_frame_data; diff --git a/third_party/aom/av1/decoder/laplace_decoder.c b/third_party/aom/av1/decoder/laplace_decoder.c index b6cf50bc7..5cc080ea7 100644 --- a/third_party/aom/av1/decoder/laplace_decoder.c +++ b/third_party/aom/av1/decoder/laplace_decoder.c @@ -39,10 +39,10 @@ static int aom_decode_pvq_split_(aom_reader *r, od_pvq_codeword_ctx *adapt, count += msbs << shift; if (count > sum) { count = sum; -#if CONFIG_DAALA_EC +#if !CONFIG_ANS r->ec.error = 1; #else -# error "CONFIG_PVQ currently requires CONFIG_DAALA_EC." +# error "CONFIG_PVQ currently requires !CONFIG_ANS." #endif } return count; |