summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/decoder
diff options
context:
space:
mode:
authortrav90 <travawine@palemoon.org>2018-10-18 21:53:44 -0500
committertrav90 <travawine@palemoon.org>2018-10-18 21:53:44 -0500
commitec910d81405c736a4490383a250299a7837c2e64 (patch)
tree4f27cc226f93a863121aef6c56313e4153a69b3e /third_party/aom/av1/decoder
parent01eb57073ba97b2d6cbf20f745dfcc508197adc3 (diff)
downloadUXP-ec910d81405c736a4490383a250299a7837c2e64.tar
UXP-ec910d81405c736a4490383a250299a7837c2e64.tar.gz
UXP-ec910d81405c736a4490383a250299a7837c2e64.tar.lz
UXP-ec910d81405c736a4490383a250299a7837c2e64.tar.xz
UXP-ec910d81405c736a4490383a250299a7837c2e64.zip
Update aom to commit id e87fb2378f01103d5d6e477a4ef6892dc714e614
Diffstat (limited to 'third_party/aom/av1/decoder')
-rw-r--r--third_party/aom/av1/decoder/decodeframe.c2275
-rw-r--r--third_party/aom/av1/decoder/decodeframe.h21
-rw-r--r--third_party/aom/av1/decoder/decodemv.c1039
-rw-r--r--third_party/aom/av1/decoder/decoder.c139
-rw-r--r--third_party/aom/av1/decoder/decoder.h21
-rw-r--r--third_party/aom/av1/decoder/decodetxb.c418
-rw-r--r--third_party/aom/av1/decoder/decodetxb.h5
-rw-r--r--third_party/aom/av1/decoder/detokenize.c190
-rw-r--r--third_party/aom/av1/decoder/detokenize.h2
-rw-r--r--third_party/aom/av1/decoder/dthread.c5
-rw-r--r--third_party/aom/av1/decoder/inspection.c32
-rw-r--r--third_party/aom/av1/decoder/symbolrate.h88
12 files changed, 2638 insertions, 1597 deletions
diff --git a/third_party/aom/av1/decoder/decodeframe.c b/third_party/aom/av1/decoder/decodeframe.c
index 247e60e04..9ec3b60eb 100644
--- a/third_party/aom/av1/decoder/decodeframe.c
+++ b/third_party/aom/av1/decoder/decodeframe.c
@@ -19,9 +19,9 @@
#include "aom/aom_codec.h"
#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/binary_codes_reader.h"
#include "aom_dsp/bitreader.h"
#include "aom_dsp/bitreader_buffer.h"
-#include "aom_dsp/binary_codes_reader.h"
#include "aom_mem/aom_mem.h"
#include "aom_ports/mem.h"
#include "aom_ports/mem_ops.h"
@@ -44,6 +44,7 @@
#include "av1/common/entropymode.h"
#include "av1/common/entropymv.h"
#include "av1/common/idct.h"
+#include "av1/common/mvref_common.h"
#include "av1/common/pred_common.h"
#include "av1/common/quant_common.h"
#include "av1/common/reconinter.h"
@@ -63,6 +64,7 @@
#endif
#include "av1/decoder/detokenize.h"
#include "av1/decoder/dsubexp.h"
+#include "av1/decoder/symbolrate.h"
#if CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
#include "av1/common/warped_motion.h"
@@ -85,6 +87,17 @@
#include "av1/common/cfl.h"
#endif
+#if CONFIG_STRIPED_LOOP_RESTORATION && !CONFIG_LOOP_RESTORATION
+#error "striped_loop_restoration requires loop_restoration"
+#endif
+
+#if CONFIG_LOOP_RESTORATION
+static void loop_restoration_read_sb_coeffs(const AV1_COMMON *const cm,
+ MACROBLOCKD *xd,
+ aom_reader *const r, int plane,
+ int rtile_idx);
+#endif
+
static struct aom_read_bit_buffer *init_read_bit_buffer(
AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]);
@@ -94,7 +107,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
struct aom_read_bit_buffer *rb);
static int is_compound_reference_allowed(const AV1_COMMON *cm) {
-#if CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS // Normative in decoder
+#if CONFIG_ONE_SIDED_COMPOUND // Normative in decoder
return !frame_is_intra_only(cm);
#else
int i;
@@ -103,7 +116,7 @@ static int is_compound_reference_allowed(const AV1_COMMON *cm) {
if (cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1]) return 1;
return 0;
-#endif // CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS
+#endif // CONFIG_ONE_SIDED_COMPOUND
}
static void setup_compound_reference_mode(AV1_COMMON *cm) {
@@ -114,12 +127,8 @@ static void setup_compound_reference_mode(AV1_COMMON *cm) {
cm->comp_fwd_ref[3] = GOLDEN_FRAME;
cm->comp_bwd_ref[0] = BWDREF_FRAME;
-#if CONFIG_ALTREF2
cm->comp_bwd_ref[1] = ALTREF2_FRAME;
cm->comp_bwd_ref[2] = ALTREF_FRAME;
-#else // !CONFIG_ALTREF2
- cm->comp_bwd_ref[1] = ALTREF_FRAME;
-#endif // CONFIG_ALTREF2
#else // !CONFIG_EXT_REFS
if (cm->ref_frame_sign_bias[LAST_FRAME] ==
cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
@@ -167,7 +176,7 @@ static TX_MODE read_tx_mode(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
#endif // CONFIG_VAR_TX_NO_TX_MODE
}
-#if !CONFIG_NEW_MULTISYMBOL
+#if !CONFIG_RESTRICT_COMPRESSED_HDR
static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
int i;
for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i)
@@ -196,15 +205,11 @@ static REFERENCE_MODE read_frame_reference_mode(
}
}
+#if !CONFIG_RESTRICT_COMPRESSED_HDR
static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
-#if CONFIG_NEW_MULTISYMBOL && !CONFIG_EXT_COMP_REFS
- (void)r;
-#else
FRAME_CONTEXT *const fc = cm->fc;
int i;
-#endif
-#if !CONFIG_NEW_MULTISYMBOL
if (cm->reference_mode == REFERENCE_MODE_SELECT)
for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
av1_diff_update_prob(r, &fc->comp_inter_prob[i], ACCT_STR);
@@ -217,7 +222,6 @@ static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
}
}
}
-#endif
if (cm->reference_mode != SINGLE_REFERENCE) {
#if CONFIG_EXT_COMP_REFS
@@ -231,7 +235,6 @@ static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
}
#endif // CONFIG_EXT_COMP_REFS
-#if !CONFIG_NEW_MULTISYMBOL
for (i = 0; i < REF_CONTEXTS; ++i) {
int j;
#if CONFIG_EXT_REFS
@@ -244,11 +247,9 @@ static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
av1_diff_update_prob(r, &fc->comp_ref_prob[i][j], ACCT_STR);
#endif // CONFIG_EXT_REFS
}
-#endif // CONFIG_NEW_MULTISYMBOL
}
}
-#if !CONFIG_NEW_MULTISYMBOL
static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
int i;
for (i = 0; i < n; ++i) av1_diff_update_prob(r, &p[i], ACCT_STR);
@@ -267,7 +268,7 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
#endif
static void inverse_transform_block(MACROBLOCKD *xd, int plane,
-#if CONFIG_LGT
+#if CONFIG_LGT_FROM_PRED
PREDICTION_MODE mode,
#endif
const TX_TYPE tx_type,
@@ -276,9 +277,12 @@ static void inverse_transform_block(MACROBLOCKD *xd, int plane,
struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *const dqcoeff = pd->dqcoeff;
av1_inverse_transform_block(xd, dqcoeff,
-#if CONFIG_LGT
+#if CONFIG_LGT_FROM_PRED
mode,
#endif
+#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
+ xd->mrc_mask,
+#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
tx_type, tx_size, dst, stride, eob);
memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
}
@@ -337,10 +341,11 @@ static int av1_pvq_decode_helper(MACROBLOCKD *xd, tran_low_t *ref_coeff,
pvq_dc_quant = 1;
else {
if (use_activity_masking)
- pvq_dc_quant = OD_MAXI(
- 1, (quant[0] << (OD_COEFF_SHIFT - 3) >> hbd_downshift) *
- dec->state.pvq_qm_q4[pli][od_qm_get_index(bs, 0)] >>
- 4);
+ pvq_dc_quant =
+ OD_MAXI(1,
+ (quant[0] << (OD_COEFF_SHIFT - 3) >> hbd_downshift) *
+ dec->state.pvq_qm_q4[pli][od_qm_get_index(bs, 0)] >>
+ 4);
else
pvq_dc_quant =
OD_MAXI(1, quant[0] << (OD_COEFF_SHIFT - 3) >> hbd_downshift);
@@ -471,133 +476,6 @@ static int av1_pvq_decode_helper2(AV1_COMMON *cm, MACROBLOCKD *const xd,
}
#endif
-#if CONFIG_DPCM_INTRA
-static void process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
- const tran_low_t *dqcoeff, uint8_t *dst,
- int dst_stride) {
- const int tx1d_width = tx_size_wide[tx_size];
- const int tx1d_height = tx_size_high[tx_size];
- dpcm_inv_txfm_add_func inverse_tx =
- av1_get_dpcm_inv_txfm_add_func(tx1d_width);
- for (int r = 0; r < tx1d_height; ++r) {
- if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0]));
- inverse_tx(dqcoeff, 1, tx_type_1d, dst);
- dqcoeff += tx1d_width;
- dst += dst_stride;
- }
-}
-
-static void process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
- const tran_low_t *dqcoeff, uint8_t *dst,
- int dst_stride) {
- const int tx1d_width = tx_size_wide[tx_size];
- const int tx1d_height = tx_size_high[tx_size];
- dpcm_inv_txfm_add_func inverse_tx =
- av1_get_dpcm_inv_txfm_add_func(tx1d_height);
- tran_low_t tx_buff[64];
- for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) {
- for (int r = 0; r < tx1d_height; ++r) {
- if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1];
- tx_buff[r] = dqcoeff[r * tx1d_width];
- }
- inverse_tx(tx_buff, dst_stride, tx_type_1d, dst);
- }
-}
-
-#if CONFIG_HIGHBITDEPTH
-static void hbd_process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
- int bd, const tran_low_t *dqcoeff,
- uint8_t *dst8, int dst_stride) {
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
- const int tx1d_width = tx_size_wide[tx_size];
- const int tx1d_height = tx_size_high[tx_size];
- hbd_dpcm_inv_txfm_add_func inverse_tx =
- av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_width);
- for (int r = 0; r < tx1d_height; ++r) {
- if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0]));
- inverse_tx(dqcoeff, 1, tx_type_1d, bd, dst, 1);
- dqcoeff += tx1d_width;
- dst += dst_stride;
- }
-}
-
-static void hbd_process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
- int bd, const tran_low_t *dqcoeff,
- uint8_t *dst8, int dst_stride) {
- uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
- const int tx1d_width = tx_size_wide[tx_size];
- const int tx1d_height = tx_size_high[tx_size];
- hbd_dpcm_inv_txfm_add_func inverse_tx =
- av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_height);
- tran_low_t tx_buff[64];
- switch (tx1d_height) {
- case 4: inverse_tx = av1_hbd_dpcm_inv_txfm_add_4_c; break;
- case 8: inverse_tx = av1_hbd_dpcm_inv_txfm_add_8_c; break;
- case 16: inverse_tx = av1_hbd_dpcm_inv_txfm_add_16_c; break;
- case 32: inverse_tx = av1_hbd_dpcm_inv_txfm_add_32_c; break;
- default: assert(0);
- }
-
- for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) {
- for (int r = 0; r < tx1d_height; ++r) {
- if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1];
- tx_buff[r] = dqcoeff[r * tx1d_width];
- }
- inverse_tx(tx_buff, dst_stride, tx_type_1d, bd, dst, 0);
- }
-}
-#endif // CONFIG_HIGHBITDEPTH
-
-static void inverse_transform_block_dpcm(MACROBLOCKD *xd, int plane,
- PREDICTION_MODE mode, TX_SIZE tx_size,
- TX_TYPE tx_type, uint8_t *dst,
- int dst_stride, int16_t scan_line) {
- struct macroblockd_plane *const pd = &xd->plane[plane];
- tran_low_t *const dqcoeff = pd->dqcoeff;
- TX_TYPE_1D tx_type_1d = DCT_1D;
- switch (tx_type) {
- case IDTX: tx_type_1d = IDTX_1D; break;
- case V_DCT:
- assert(mode == H_PRED);
- tx_type_1d = DCT_1D;
- break;
- case H_DCT:
- assert(mode == V_PRED);
- tx_type_1d = DCT_1D;
- break;
- default: assert(0);
- }
- switch (mode) {
- case V_PRED:
-#if CONFIG_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- hbd_process_block_dpcm_vert(tx_size, tx_type_1d, xd->bd, dqcoeff, dst,
- dst_stride);
- } else {
-#endif // CONFIG_HIGHBITDEPTH
- process_block_dpcm_vert(tx_size, tx_type_1d, dqcoeff, dst, dst_stride);
-#if CONFIG_HIGHBITDEPTH
- }
-#endif // CONFIG_HIGHBITDEPTH
- break;
- case H_PRED:
-#if CONFIG_HIGHBITDEPTH
- if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
- hbd_process_block_dpcm_horz(tx_size, tx_type_1d, xd->bd, dqcoeff, dst,
- dst_stride);
- } else {
-#endif // CONFIG_HIGHBITDEPTH
- process_block_dpcm_horz(tx_size, tx_type_1d, dqcoeff, dst, dst_stride);
-#if CONFIG_HIGHBITDEPTH
- }
-#endif // CONFIG_HIGHBITDEPTH
- break;
- default: assert(0);
- }
- memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
-}
-#endif // CONFIG_DPCM_INTRA
-
static void predict_and_reconstruct_intra_block(
AV1_COMMON *cm, MACROBLOCKD *const xd, aom_reader *const r,
MB_MODE_INFO *const mbmi, int plane, int row, int col, TX_SIZE tx_size) {
@@ -606,7 +484,7 @@ static void predict_and_reconstruct_intra_block(
#if CONFIG_PVQ
(void)r;
#endif
- av1_predict_intra_block_facade(xd, plane, block_idx, col, row, tx_size);
+ av1_predict_intra_block_facade(cm, xd, plane, block_idx, col, row, tx_size);
if (!mbmi->skip) {
#if !CONFIG_PVQ
@@ -631,25 +509,12 @@ static void predict_and_reconstruct_intra_block(
if (eob) {
uint8_t *dst =
&pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
-#if CONFIG_DPCM_INTRA || CONFIG_LGT
- const PREDICTION_MODE mode =
- get_prediction_mode(xd->mi[0], plane, tx_size, block_idx);
-#if CONFIG_DPCM_INTRA
- if (av1_use_dpcm_intra(plane, mode, tx_type, mbmi)) {
- inverse_transform_block_dpcm(xd, plane, mode, tx_size, tx_type, dst,
- pd->dst.stride, max_scan_line);
- } else {
-#endif // CONFIG_DPCM_INTRA
-#endif // CONFIG_DPCM_INTRA || CONFIG_LGT
- inverse_transform_block(xd, plane,
-#if CONFIG_LGT
- mode,
-#endif
- tx_type, tx_size, dst, pd->dst.stride,
- max_scan_line, eob);
-#if CONFIG_DPCM_INTRA
- }
-#endif // CONFIG_DPCM_INTRA
+ inverse_transform_block(xd, plane,
+#if CONFIG_LGT_FROM_PRED
+ mbmi->mode,
+#endif
+ tx_type, tx_size, dst, pd->dst.stride,
+ max_scan_line, eob);
}
#else // !CONFIG_PVQ
const TX_TYPE tx_type =
@@ -658,21 +523,10 @@ static void predict_and_reconstruct_intra_block(
#endif // !CONFIG_PVQ
}
#if CONFIG_CFL
- if (plane == AOM_PLANE_Y) {
- struct macroblockd_plane *const pd = &xd->plane[plane];
-#if CONFIG_CHROMA_SUB8X8
- const BLOCK_SIZE plane_bsize =
- AOMMAX(BLOCK_4X4, get_plane_block_size(mbmi->sb_type, pd));
-#else
- const BLOCK_SIZE plane_bsize = get_plane_block_size(mbmi->sb_type, pd);
-#endif
- uint8_t *dst =
- &pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
- // TODO (ltrudeau) Store sub-8x8 inter blocks when bottom right block is
- // intra predicted.
- cfl_store(xd->cfl, dst, pd->dst.stride, row, col, tx_size, plane_bsize);
+ if (plane == AOM_PLANE_Y && xd->cfl->store_y) {
+ cfl_store_tx(xd, row, col, tx_size, mbmi->sb_type);
}
-#endif
+#endif // CONFIG_CFL
}
#if CONFIG_VAR_TX && !CONFIG_COEF_INTERLEAVE
@@ -714,7 +568,7 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
&max_scan_line, r, mbmi->segment_id);
#endif // CONFIG_LV_MAP
inverse_transform_block(xd, plane,
-#if CONFIG_LGT
+#if CONFIG_LGT_FROM_PRED
mbmi->mode,
#endif
tx_type, plane_tx_size,
@@ -729,7 +583,8 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
if (is_qttx) assert(blk_row == 0 && blk_col == 0 && block == 0);
#else
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
- assert(sub_txs < tx_size);
+ assert(IMPLIES(tx_size <= TX_4X4, sub_txs == tx_size));
+ assert(IMPLIES(tx_size > TX_4X4, sub_txs < tx_size));
#endif
const int bsl = tx_size_wide_unit[sub_txs];
int sub_step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
@@ -801,7 +656,7 @@ static int reconstruct_inter_block(AV1_COMMON *cm, MACROBLOCKD *const xd,
&pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
if (eob)
inverse_transform_block(xd, plane,
-#if CONFIG_LGT
+#if CONFIG_LGT_FROM_PRED
xd->mi[0]->mbmi.mode,
#endif
tx_type, tx_size, dst, pd->dst.stride,
@@ -961,13 +816,13 @@ static void set_param_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd,
static void set_ref(AV1_COMMON *const cm, MACROBLOCKD *const xd, int idx,
int mi_row, int mi_col) {
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
RefBuffer *ref_buffer =
has_second_ref(mbmi) ? &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME]
: &cm->frame_refs[mbmi->ref_frame[0] - LAST_FRAME];
#else
RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
xd->block_refs[idx] = ref_buffer;
if (!av1_is_valid_scale(&ref_buffer->sf))
aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
@@ -1006,9 +861,9 @@ static void dec_predict_b_extend(
mi_row_ori, mi_col_ori);
set_ref(cm, xd, 0, mi_row_pred, mi_col_pred);
if (has_second_ref(&xd->mi[0]->mbmi)
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
|| is_inter_singleref_comp_mode(xd->mi[0]->mbmi.mode)
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
)
set_ref(cm, xd, 1, mi_row_pred, mi_col_pred);
if (!bextend) mbmi->tx_size = max_txsize_lookup[bsize_top];
@@ -1019,19 +874,13 @@ static void dec_predict_b_extend(
(c >> xd->plane[plane].subsampling_x);
if (!b_sub8x8)
- av1_build_inter_predictor_sb_extend(&pbi->common, xd,
-#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
-#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, plane,
- bsize_pred);
+ av1_build_inter_predictor_sb_extend(&pbi->common, xd, mi_row_ori,
+ mi_col_ori, mi_row_pred, mi_col_pred,
+ plane, bsize_pred);
else
- av1_build_inter_predictor_sb_sub8x8_extend(&pbi->common, xd,
-#if CONFIG_EXT_INTER
- mi_row_ori, mi_col_ori,
-#endif // CONFIG_EXT_INTER
- mi_row_pred, mi_col_pred, plane,
- bsize_pred, block);
+ av1_build_inter_predictor_sb_sub8x8_extend(
+ &pbi->common, xd, mi_row_ori, mi_col_ori, mi_row_pred, mi_col_pred,
+ plane, bsize_pred, block);
}
static void dec_extend_dir(AV1Decoder *const pbi, MACROBLOCKD *const xd,
@@ -1556,6 +1405,9 @@ static void dec_predict_sb_complex(AV1Decoder *const pbi, MACROBLOCKD *const xd,
}
break;
#if CONFIG_EXT_PARTITION_TYPES
+#if CONFIG_EXT_PARTITION_TYPES_AB
+#error HORZ/VERT_A/B partitions not yet updated in superres code
+#endif
case PARTITION_HORZ_A:
dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col,
mi_row_top, mi_col_top, dst_buf, dst_stride,
@@ -1786,7 +1638,6 @@ static void decode_mbmi_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#endif
av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
#endif // CONFIG_SUPERTX
-
if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) {
const BLOCK_SIZE uv_subsize =
ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y];
@@ -1803,6 +1654,94 @@ static void decode_mbmi_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
aom_merge_corrupted_flag(&xd->corrupted, reader_corrupted_flag);
}
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+static void set_mode_info_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ int mi_row, int mi_col) {
+ const int offset = mi_row * cm->mi_stride + mi_col;
+ xd->mi = cm->mi_grid_visible + offset;
+ xd->mi[0] = &cm->mi[offset];
+}
+
+static void get_ncobmc_recon(AV1_COMMON *const cm, MACROBLOCKD *xd, int mi_row,
+ int mi_col, int bsize, int mode) {
+ uint8_t *pred_buf[4][MAX_MB_PLANE];
+ int pred_stride[MAX_MB_PLANE] = { MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE };
+ // target block in pxl
+ int pxl_row = mi_row << MI_SIZE_LOG2;
+ int pxl_col = mi_col << MI_SIZE_LOG2;
+
+ int plane;
+#if CONFIG_HIGHBITDEPTH
+ if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
+ int len = sizeof(uint16_t);
+ ASSIGN_ALIGNED_PTRS_HBD(pred_buf[0], cm->ncobmcaw_buf[0], MAX_SB_SQUARE,
+ len);
+ ASSIGN_ALIGNED_PTRS_HBD(pred_buf[1], cm->ncobmcaw_buf[1], MAX_SB_SQUARE,
+ len);
+ ASSIGN_ALIGNED_PTRS_HBD(pred_buf[2], cm->ncobmcaw_buf[2], MAX_SB_SQUARE,
+ len);
+ ASSIGN_ALIGNED_PTRS_HBD(pred_buf[3], cm->ncobmcaw_buf[3], MAX_SB_SQUARE,
+ len);
+ } else {
+#endif // CONFIG_HIGHBITDEPTH
+ ASSIGN_ALIGNED_PTRS(pred_buf[0], cm->ncobmcaw_buf[0], MAX_SB_SQUARE);
+ ASSIGN_ALIGNED_PTRS(pred_buf[1], cm->ncobmcaw_buf[1], MAX_SB_SQUARE);
+ ASSIGN_ALIGNED_PTRS(pred_buf[2], cm->ncobmcaw_buf[2], MAX_SB_SQUARE);
+ ASSIGN_ALIGNED_PTRS(pred_buf[3], cm->ncobmcaw_buf[3], MAX_SB_SQUARE);
+#if CONFIG_HIGHBITDEPTH
+ }
+#endif
+ av1_get_ext_blk_preds(cm, xd, bsize, mi_row, mi_col, pred_buf, pred_stride);
+ av1_get_ori_blk_pred(cm, xd, bsize, mi_row, mi_col, pred_buf[3], pred_stride);
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ build_ncobmc_intrpl_pred(cm, xd, plane, pxl_row, pxl_col, bsize, pred_buf,
+ pred_stride, mode);
+ }
+}
+
+static void av1_get_ncobmc_recon(AV1_COMMON *const cm, MACROBLOCKD *const xd,
+ int bsize, const int mi_row, const int mi_col,
+ const NCOBMC_MODE modes) {
+ const int mi_width = mi_size_wide[bsize];
+ const int mi_height = mi_size_high[bsize];
+
+ assert(bsize >= BLOCK_8X8);
+
+ reset_xd_boundary(xd, mi_row, mi_height, mi_col, mi_width, cm->mi_rows,
+ cm->mi_cols);
+ get_ncobmc_recon(cm, xd, mi_row, mi_col, bsize, modes);
+}
+
+static void recon_ncobmc_intrpl_pred(AV1_COMMON *const cm,
+ MACROBLOCKD *const xd, int mi_row,
+ int mi_col, BLOCK_SIZE bsize) {
+ MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ const int mi_width = mi_size_wide[bsize];
+ const int mi_height = mi_size_high[bsize];
+ const int hbs = AOMMAX(mi_size_wide[bsize] / 2, mi_size_high[bsize] / 2);
+ const BLOCK_SIZE sqr_blk = bsize_2_sqr_bsize[bsize];
+ if (mi_width > mi_height) {
+ // horizontal partition
+ av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col, mbmi->ncobmc_mode[0]);
+ xd->mi += hbs;
+ av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col + hbs,
+ mbmi->ncobmc_mode[1]);
+ } else if (mi_height > mi_width) {
+ // vertical partition
+ av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col, mbmi->ncobmc_mode[0]);
+ xd->mi += hbs * xd->mi_stride;
+ av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row + hbs, mi_col,
+ mbmi->ncobmc_mode[1]);
+ } else {
+ av1_get_ncobmc_recon(cm, xd, sqr_blk, mi_row, mi_col, mbmi->ncobmc_mode[0]);
+ }
+ set_mode_info_offsets(cm, xd, mi_row, mi_col);
+ // restore dst buffer and mode info
+ av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row,
+ mi_col);
+}
+#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
+
static void decode_token_and_recon_block(AV1Decoder *const pbi,
MACROBLOCKD *const xd, int mi_row,
int mi_col, aom_reader *r,
@@ -1815,46 +1754,33 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis);
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
+#if CONFIG_CFL && CONFIG_CHROMA_SUB8X8
+ CFL_CTX *const cfl = xd->cfl;
+ cfl->is_chroma_reference = is_chroma_reference(
+ mi_row, mi_col, bsize, cfl->subsampling_x, cfl->subsampling_y);
+#endif // CONFIG_CFL && CONFIG_CHROMA_SUB8X8
-#if CONFIG_DELTA_Q
if (cm->delta_q_present_flag) {
int i;
for (i = 0; i < MAX_SEGMENTS; i++) {
#if CONFIG_EXT_DELTA_Q
- xd->plane[0].seg_dequant[i][0] =
- av1_dc_quant(av1_get_qindex(&cm->seg, i, xd->current_qindex),
- cm->y_dc_delta_q, cm->bit_depth);
- xd->plane[0].seg_dequant[i][1] = av1_ac_quant(
- av1_get_qindex(&cm->seg, i, xd->current_qindex), 0, cm->bit_depth);
- xd->plane[1].seg_dequant[i][0] =
- av1_dc_quant(av1_get_qindex(&cm->seg, i, xd->current_qindex),
- cm->uv_dc_delta_q, cm->bit_depth);
- xd->plane[1].seg_dequant[i][1] =
- av1_ac_quant(av1_get_qindex(&cm->seg, i, xd->current_qindex),
- cm->uv_ac_delta_q, cm->bit_depth);
- xd->plane[2].seg_dequant[i][0] =
- av1_dc_quant(av1_get_qindex(&cm->seg, i, xd->current_qindex),
- cm->uv_dc_delta_q, cm->bit_depth);
- xd->plane[2].seg_dequant[i][1] =
- av1_ac_quant(av1_get_qindex(&cm->seg, i, xd->current_qindex),
- cm->uv_ac_delta_q, cm->bit_depth);
+ const int current_qindex =
+ av1_get_qindex(&cm->seg, i, xd->current_qindex);
#else
- xd->plane[0].seg_dequant[i][0] =
- av1_dc_quant(xd->current_qindex, cm->y_dc_delta_q, cm->bit_depth);
- xd->plane[0].seg_dequant[i][1] =
- av1_ac_quant(xd->current_qindex, 0, cm->bit_depth);
- xd->plane[1].seg_dequant[i][0] =
- av1_dc_quant(xd->current_qindex, cm->uv_dc_delta_q, cm->bit_depth);
- xd->plane[1].seg_dequant[i][1] =
- av1_ac_quant(xd->current_qindex, cm->uv_ac_delta_q, cm->bit_depth);
- xd->plane[2].seg_dequant[i][0] =
- av1_dc_quant(xd->current_qindex, cm->uv_dc_delta_q, cm->bit_depth);
- xd->plane[2].seg_dequant[i][1] =
- av1_ac_quant(xd->current_qindex, cm->uv_ac_delta_q, cm->bit_depth);
-#endif
+ const int current_qindex = xd->current_qindex;
+#endif // CONFIG_EXT_DELTA_Q
+ int j;
+ for (j = 0; j < MAX_MB_PLANE; ++j) {
+ const int dc_delta_q = j == 0 ? cm->y_dc_delta_q : cm->uv_dc_delta_q;
+ const int ac_delta_q = j == 0 ? 0 : cm->uv_ac_delta_q;
+
+ xd->plane[j].seg_dequant[i][0] =
+ av1_dc_quant(current_qindex, dc_delta_q, cm->bit_depth);
+ xd->plane[j].seg_dequant[i][1] =
+ av1_ac_quant(current_qindex, ac_delta_q, cm->bit_depth);
+ }
}
}
-#endif
#if CONFIG_CB4X4
if (mbmi->skip) av1_reset_skip_context(xd, mi_row, mi_col, bsize);
@@ -1898,12 +1824,13 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
int row_y, col_y, row_c, col_c;
int plane;
-#if CONFIG_PALETTE
+// TODO(anybody) : remove this flag when PVQ supports pallete coding tool
+#if !CONFIG_PVQ
for (plane = 0; plane <= 1; ++plane) {
if (mbmi->palette_mode_info.palette_size[plane])
av1_decode_palette_tokens(xd, plane, r);
}
-#endif
+#endif // !CONFIG_PVQ
for (row_y = 0; row_y < tu_num_h_y; row_y++) {
for (col_y = 0; col_y < tu_num_w_y; col_y++) {
@@ -1983,12 +1910,15 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
#else // CONFIG_COEF_INTERLEAVE
if (!is_inter_block(mbmi)) {
int plane;
-#if CONFIG_PALETTE
+
+// TODO(anybody) : remove this flag when PVQ supports pallete coding tool
+#if !CONFIG_PVQ
for (plane = 0; plane <= 1; ++plane) {
if (mbmi->palette_mode_info.palette_size[plane])
av1_decode_palette_tokens(xd, plane, r);
}
-#endif // CONFIG_PALETTE
+#endif // #if !CONFIG_PVQ
+
for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const TX_SIZE tx_size = av1_get_tx_size(plane, xd);
@@ -2035,14 +1965,18 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
} else {
int ref;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
- for (ref = 0; ref < 1 + is_inter_anyref_comp_mode(mbmi->mode); ++ref) {
+#if CONFIG_COMPOUND_SINGLEREF
+ for (ref = 0; ref < 1 + is_inter_anyref_comp_mode(mbmi->mode); ++ref)
+#else
+ for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref)
+#endif // CONFIG_COMPOUND_SINGLEREF
+ {
const MV_REFERENCE_FRAME frame =
+#if CONFIG_COMPOUND_SINGLEREF
has_second_ref(mbmi) ? mbmi->ref_frame[ref] : mbmi->ref_frame[0];
#else
- for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
- const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+ mbmi->ref_frame[ref];
+#endif // CONFIG_COMPOUND_SINGLEREF
if (frame < LAST_FRAME) {
#if CONFIG_INTRABC
assert(is_intrabc_block(mbmi));
@@ -2079,7 +2013,15 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
#endif
}
#endif // CONFIG_MOTION_VAR
-
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+ if (mbmi->motion_mode == NCOBMC_ADAPT_WEIGHT) {
+ int plane;
+ recon_ncobmc_intrpl_pred(cm, xd, mi_row, mi_col, bsize);
+ for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ get_pred_from_intrpl_buf(xd, mi_row, mi_col, bsize, plane);
+ }
+ }
+#endif
// Reconstruction
if (!mbmi->skip) {
int eobtotal = 0;
@@ -2093,8 +2035,8 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
#elif CONFIG_CB4X4
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
#else
- const BLOCK_SIZE plane_bsize =
- get_plane_block_size(AOMMAX(BLOCK_8X8, bsize), pd);
+ const BLOCK_SIZE plane_bsize =
+ get_plane_block_size(AOMMAX(BLOCK_8X8, bsize), pd);
#endif
const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane);
const int max_blocks_high = max_block_high(xd, plane_bsize, plane);
@@ -2116,7 +2058,8 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
mu_blocks_wide = AOMMIN(max_blocks_wide, mu_blocks_wide);
mu_blocks_high = AOMMIN(max_blocks_high, mu_blocks_high);
- const TX_SIZE max_tx_size = get_vartx_max_txsize(mbmi, plane_bsize);
+ const TX_SIZE max_tx_size = get_vartx_max_txsize(
+ mbmi, plane_bsize, pd->subsampling_x || pd->subsampling_y);
const int bh_var_tx = tx_size_high_unit[max_tx_size];
const int bw_var_tx = tx_size_wide_unit[max_tx_size];
int block = 0;
@@ -2152,13 +2095,25 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
}
}
}
+#if CONFIG_CFL && CONFIG_CHROMA_SUB8X8
+ if (mbmi->uv_mode != UV_CFL_PRED) {
+#if CONFIG_DEBUG
+ if (cfl->is_chroma_reference) {
+ cfl_clear_sub8x8_val(cfl);
+ }
+#endif
+ if (!cfl->is_chroma_reference && is_inter_block(mbmi)) {
+ cfl_store_block(xd, mbmi->sb_type, mbmi->tx_size);
+ }
+ }
+#endif // CONFIG_CFL && CONFIG_CHROMA_SUB8X8
#endif // CONFIG_COEF_INTERLEAVE
int reader_corrupted_flag = aom_reader_has_error(r);
aom_merge_corrupted_flag(&xd->corrupted, reader_corrupted_flag);
}
-#if (CONFIG_NCOBMC || CONFIG_NCOBMC_ADAPT_WEIGHT) && CONFIG_MOTION_VAR
+#if NC_MODE_INFO && CONFIG_MOTION_VAR
static void detoken_and_recon_sb(AV1Decoder *const pbi, MACROBLOCKD *const xd,
int mi_row, int mi_col, aom_reader *r,
BLOCK_SIZE bsize) {
@@ -2210,6 +2165,9 @@ static void detoken_and_recon_sb(AV1Decoder *const pbi, MACROBLOCKD *const xd,
detoken_and_recon_sb(pbi, xd, mi_row + hbs, mi_col + hbs, r, subsize);
break;
#if CONFIG_EXT_PARTITION_TYPES
+#if CONFIG_EXT_PARTITION_TYPES_AB
+#error NC_MODE_INFO+MOTION_VAR not yet supported for new HORZ/VERT_AB partitions
+#endif
case PARTITION_HORZ_A:
decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize2);
decode_token_and_recon_block(pbi, xd, mi_row, mi_col + hbs, r, bsize2);
@@ -2258,7 +2216,7 @@ static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#endif
bsize);
-#if !(CONFIG_MOTION_VAR && (CONFIG_NCOBMC || CONFIG_NCOBMC_ADAPT_WEIGHT))
+#if !(CONFIG_MOTION_VAR && NC_MODE_INFO)
#if CONFIG_SUPERTX
if (!supertx_enabled)
#endif // CONFIG_SUPERTX
@@ -2273,13 +2231,8 @@ static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_UNPOISON_PARTITION_CTX
const int ctx =
partition_plane_context(xd, mi_row, mi_col, has_rows, has_cols, bsize);
- const aom_prob *const probs =
- ctx < PARTITION_CONTEXTS ? cm->fc->partition_prob[ctx] : NULL;
- FRAME_COUNTS *const counts = ctx < PARTITION_CONTEXTS ? xd->counts : NULL;
#else
const int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
- const aom_prob *const probs = cm->fc->partition_prob[ctx];
- FRAME_COUNTS *const counts = xd->counts;
#endif
PARTITION_TYPE p;
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
@@ -2287,26 +2240,33 @@ static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_cdf_prob *partition_cdf = (ctx >= 0) ? ec_ctx->partition_cdf[ctx] : NULL;
- if (has_rows && has_cols)
+ if (has_rows && has_cols) {
#if CONFIG_EXT_PARTITION_TYPES
- if (bsize <= BLOCK_8X8)
- p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, PARTITION_TYPES,
- ACCT_STR);
- else
- p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, EXT_PARTITION_TYPES,
- ACCT_STR);
+ const int num_partition_types =
+ (mi_width_log2_lookup[bsize] > mi_width_log2_lookup[BLOCK_8X8])
+ ? EXT_PARTITION_TYPES
+ : PARTITION_TYPES;
#else
- p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, PARTITION_TYPES,
- ACCT_STR);
+ const int num_partition_types = PARTITION_TYPES;
#endif // CONFIG_EXT_PARTITION_TYPES
- else if (!has_rows && has_cols)
- p = aom_read(r, probs[1], ACCT_STR) ? PARTITION_SPLIT : PARTITION_HORZ;
- else if (has_rows && !has_cols)
- p = aom_read(r, probs[2], ACCT_STR) ? PARTITION_SPLIT : PARTITION_VERT;
- else
+ p = (PARTITION_TYPE)aom_read_symbol(r, partition_cdf, num_partition_types,
+ ACCT_STR);
+ } else if (!has_rows && has_cols) {
+ assert(bsize > BLOCK_8X8);
+ aom_cdf_prob cdf[2];
+ partition_gather_vert_alike(cdf, partition_cdf);
+ assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
+ p = aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_HORZ;
+ // gather cols
+ } else if (has_rows && !has_cols) {
+ assert(bsize > BLOCK_8X8);
+ aom_cdf_prob cdf[2];
+ partition_gather_horz_alike(cdf, partition_cdf);
+ assert(cdf[1] == AOM_ICDF(CDF_PROB_TOP));
+ p = aom_read_cdf(r, cdf, 2, ACCT_STR) ? PARTITION_SPLIT : PARTITION_VERT;
+ } else {
p = PARTITION_SPLIT;
-
- if (counts) ++counts->partition[ctx][p];
+ }
return p;
}
@@ -2341,6 +2301,9 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
AV1_COMMON *const cm = &pbi->common;
const int num_8x8_wh = mi_size_wide[bsize];
const int hbs = num_8x8_wh >> 1;
+#if CONFIG_EXT_PARTITION_TYPES && CONFIG_EXT_PARTITION_TYPES_AB
+ const int qbs = num_8x8_wh >> 2;
+#endif
#if CONFIG_CB4X4
const int unify_bsize = 1;
#else
@@ -2349,9 +2312,11 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
#if CONFIG_EXT_PARTITION_TYPES
- BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
const int quarter_step = num_8x8_wh / 4;
int i;
+#if !CONFIG_EXT_PARTITION_TYPES_AB
+ BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT);
+#endif
#endif
const int has_rows = (mi_row + hbs) < cm->mi_rows;
const int has_cols = (mi_col + hbs) < cm->mi_cols;
@@ -2370,6 +2335,15 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
has_rows, has_cols, bsize);
subsize = subsize_lookup[partition][bsize]; // get_subsize(bsize, partition);
+ // Check the bitstream is conformant: if there is subsampling on the
+ // chroma planes, subsize must subsample to a valid block size.
+ const struct macroblockd_plane *const pd_u = &xd->plane[1];
+ if (get_plane_block_size(subsize, pd_u) == BLOCK_INVALID) {
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Block size %dx%d invalid with this subsampling mode",
+ block_size_wide[subsize], block_size_high[subsize]);
+ }
+
#if CONFIG_PVQ
assert(partition < PARTITION_TYPES);
assert(subsize < BLOCK_SIZES_ALL);
@@ -2387,187 +2361,105 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#endif
}
#endif // CONFIG_SUPERTX
+
+#if CONFIG_SUPERTX
+#define DEC_BLOCK_STX_ARG supertx_enabled,
+#else
+#define DEC_BLOCK_STX_ARG
+#endif
+#if CONFIG_EXT_PARTITION_TYPES
+#define DEC_BLOCK_EPT_ARG partition,
+#else
+#define DEC_BLOCK_EPT_ARG
+#endif
+#define DEC_BLOCK(db_r, db_c, db_subsize) \
+ decode_block(pbi, xd, DEC_BLOCK_STX_ARG(db_r), (db_c), r, \
+ DEC_BLOCK_EPT_ARG(db_subsize))
+#define DEC_PARTITION(db_r, db_c, db_subsize) \
+ decode_partition(pbi, xd, DEC_BLOCK_STX_ARG(db_r), (db_c), r, (db_subsize))
+
if (!hbs && !unify_bsize) {
// calculate bmode block dimensions (log 2)
xd->bmode_blocks_wl = 1 >> !!(partition & PARTITION_VERT);
xd->bmode_blocks_hl = 1 >> !!(partition & PARTITION_HORZ);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row, mi_col, r,
-#if CONFIG_EXT_PARTITION_TYPES
- partition,
-#endif // CONFIG_EXT_PARTITION_TYPES
- subsize);
+ DEC_BLOCK(mi_row, mi_col, subsize);
} else {
switch (partition) {
- case PARTITION_NONE:
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row, mi_col, r,
-#if CONFIG_EXT_PARTITION_TYPES
- partition,
-#endif // CONFIG_EXT_PARTITION_TYPES
- subsize);
- break;
+ case PARTITION_NONE: DEC_BLOCK(mi_row, mi_col, subsize); break;
case PARTITION_HORZ:
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row, mi_col, r,
-#if CONFIG_EXT_PARTITION_TYPES
- partition,
-#endif // CONFIG_EXT_PARTITION_TYPES
- subsize);
- if (has_rows)
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row + hbs, mi_col, r,
-#if CONFIG_EXT_PARTITION_TYPES
- partition,
-#endif // CONFIG_EXT_PARTITION_TYPES
- subsize);
+ DEC_BLOCK(mi_row, mi_col, subsize);
+ if (has_rows) DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_VERT:
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row, mi_col, r,
-#if CONFIG_EXT_PARTITION_TYPES
- partition,
-#endif // CONFIG_EXT_PARTITION_TYPES
- subsize);
- if (has_cols)
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row, mi_col + hbs, r,
-#if CONFIG_EXT_PARTITION_TYPES
- partition,
-#endif // CONFIG_EXT_PARTITION_TYPES
- subsize);
+ DEC_BLOCK(mi_row, mi_col, subsize);
+ if (has_cols) DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_SPLIT:
- decode_partition(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row, mi_col, r, subsize);
- decode_partition(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row, mi_col + hbs, r, subsize);
- decode_partition(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row + hbs, mi_col, r, subsize);
- decode_partition(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif // CONFIG_SUPERTX
- mi_row + hbs, mi_col + hbs, r, subsize);
+ DEC_PARTITION(mi_row, mi_col, subsize);
+ DEC_PARTITION(mi_row, mi_col + hbs, subsize);
+ DEC_PARTITION(mi_row + hbs, mi_col, subsize);
+ DEC_PARTITION(mi_row + hbs, mi_col + hbs, subsize);
break;
#if CONFIG_EXT_PARTITION_TYPES
+#if CONFIG_EXT_PARTITION_TYPES_AB
case PARTITION_HORZ_A:
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, mi_col, r, partition, bsize2);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, mi_col + hbs, r, partition, bsize2);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row + hbs, mi_col, r, partition, subsize);
+ DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
+ DEC_BLOCK(mi_row + qbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
+ DEC_BLOCK(mi_row + hbs, mi_col, subsize);
break;
case PARTITION_HORZ_B:
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, mi_col, r, partition, subsize);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row + hbs, mi_col, r, partition, bsize2);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row + hbs, mi_col + hbs, r, partition, bsize2);
+ DEC_BLOCK(mi_row, mi_col, subsize);
+ DEC_BLOCK(mi_row + hbs, mi_col, get_subsize(bsize, PARTITION_HORZ_4));
+ if (mi_row + 3 * qbs < cm->mi_rows)
+ DEC_BLOCK(mi_row + 3 * qbs, mi_col,
+ get_subsize(bsize, PARTITION_HORZ_4));
break;
case PARTITION_VERT_A:
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, mi_col, r, partition, bsize2);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row + hbs, mi_col, r, partition, bsize2);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, mi_col + hbs, r, partition, subsize);
+ DEC_BLOCK(mi_row, mi_col, get_subsize(bsize, PARTITION_VERT_4));
+ DEC_BLOCK(mi_row, mi_col + qbs, get_subsize(bsize, PARTITION_VERT_4));
+ DEC_BLOCK(mi_row, mi_col + hbs, subsize);
break;
case PARTITION_VERT_B:
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, mi_col, r, partition, subsize);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, mi_col + hbs, r, partition, bsize2);
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row + hbs, mi_col + hbs, r, partition, bsize2);
+ DEC_BLOCK(mi_row, mi_col, subsize);
+ DEC_BLOCK(mi_row, mi_col + hbs, get_subsize(bsize, PARTITION_VERT_4));
+ if (mi_col + 3 * qbs < cm->mi_cols)
+ DEC_BLOCK(mi_row, mi_col + 3 * qbs,
+ get_subsize(bsize, PARTITION_VERT_4));
break;
+#else
+ case PARTITION_HORZ_A:
+ DEC_BLOCK(mi_row, mi_col, bsize2);
+ DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
+ DEC_BLOCK(mi_row + hbs, mi_col, subsize);
+ break;
+ case PARTITION_HORZ_B:
+ DEC_BLOCK(mi_row, mi_col, subsize);
+ DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
+ DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
+ break;
+ case PARTITION_VERT_A:
+ DEC_BLOCK(mi_row, mi_col, bsize2);
+ DEC_BLOCK(mi_row + hbs, mi_col, bsize2);
+ DEC_BLOCK(mi_row, mi_col + hbs, subsize);
+ break;
+ case PARTITION_VERT_B:
+ DEC_BLOCK(mi_row, mi_col, subsize);
+ DEC_BLOCK(mi_row, mi_col + hbs, bsize2);
+ DEC_BLOCK(mi_row + hbs, mi_col + hbs, bsize2);
+ break;
+#endif
case PARTITION_HORZ_4:
for (i = 0; i < 4; ++i) {
int this_mi_row = mi_row + i * quarter_step;
if (i > 0 && this_mi_row >= cm->mi_rows) break;
-
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- this_mi_row, mi_col, r, partition, subsize);
+ DEC_BLOCK(this_mi_row, mi_col, subsize);
}
break;
case PARTITION_VERT_4:
for (i = 0; i < 4; ++i) {
int this_mi_col = mi_col + i * quarter_step;
if (i > 0 && this_mi_col >= cm->mi_cols) break;
-
- decode_block(pbi, xd,
-#if CONFIG_SUPERTX
- supertx_enabled,
-#endif
- mi_row, this_mi_col, r, partition, subsize);
+ DEC_BLOCK(mi_row, this_mi_col, subsize);
}
break;
#endif // CONFIG_EXT_PARTITION_TYPES
@@ -2575,6 +2467,11 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
}
}
+#undef DEC_PARTITION
+#undef DEC_BLOCK
+#undef DEC_BLOCK_EPT_ARG
+#undef DEC_BLOCK_STX_ARG
+
#if CONFIG_SUPERTX
if (supertx_enabled && read_token) {
uint8_t *dst_buf[3];
@@ -2583,24 +2480,20 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
set_segment_id_supertx(cm, mi_row, mi_col, bsize);
-#if CONFIG_DELTA_Q
if (cm->delta_q_present_flag) {
for (i = 0; i < MAX_SEGMENTS; i++) {
- xd->plane[0].seg_dequant[i][0] =
- av1_dc_quant(xd->current_qindex, cm->y_dc_delta_q, cm->bit_depth);
- xd->plane[0].seg_dequant[i][1] =
- av1_ac_quant(xd->current_qindex, 0, cm->bit_depth);
- xd->plane[1].seg_dequant[i][0] =
- av1_dc_quant(xd->current_qindex, cm->uv_dc_delta_q, cm->bit_depth);
- xd->plane[1].seg_dequant[i][1] =
- av1_ac_quant(xd->current_qindex, cm->uv_ac_delta_q, cm->bit_depth);
- xd->plane[2].seg_dequant[i][0] =
- av1_dc_quant(xd->current_qindex, cm->uv_dc_delta_q, cm->bit_depth);
- xd->plane[2].seg_dequant[i][1] =
- av1_ac_quant(xd->current_qindex, cm->uv_ac_delta_q, cm->bit_depth);
+ int j;
+ for (j = 0; j < MAX_MB_PLANE; ++j) {
+ const int dc_delta_q = j == 0 ? cm->y_dc_delta_q : cm->uv_dc_delta_q;
+ const int ac_delta_q = j == 0 ? 0 : cm->uv_ac_delta_q;
+
+ xd->plane[j].seg_dequant[i][0] =
+ av1_dc_quant(xd->current_qindex, dc_delta_q, cm->bit_depth);
+ xd->plane[j].seg_dequant[i][1] =
+ av1_ac_quant(xd->current_qindex, ac_delta_q, cm->bit_depth);
+ }
}
}
-#endif
xd->mi = cm->mi_grid_visible + offset;
xd->mi[0] = cm->mi + offset;
@@ -2622,18 +2515,24 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const int eset =
get_ext_tx_set(supertx_size, bsize, 1, cm->reduced_tx_set_used);
if (eset > 0) {
+ const TxSetType tx_set_type = get_ext_tx_set_type(
+ supertx_size, bsize, 1, cm->reduced_tx_set_used);
const int packed_sym =
aom_read_symbol(r, ec_ctx->inter_ext_tx_cdf[eset][supertx_size],
- ext_tx_cnt_inter[eset], ACCT_STR);
- txfm = av1_ext_tx_inter_inv[eset][packed_sym];
+ av1_num_ext_tx_set[tx_set_type], ACCT_STR);
+ txfm = av1_ext_tx_inv[tx_set_type][packed_sym];
+#if CONFIG_ENTROPY_STATS
if (xd->counts) ++xd->counts->inter_ext_tx[eset][supertx_size][txfm];
+#endif // CONFIG_ENTROPY_STATS
}
}
#else
if (supertx_size < TX_32X32) {
txfm = aom_read_symbol(r, ec_ctx->inter_ext_tx_cdf[supertx_size],
TX_TYPES, ACCT_STR);
+#if CONFIG_ENTROPY_STATS
if (xd->counts) ++xd->counts->inter_ext_tx[supertx_size][txfm];
+#endif // CONFIG_ENTROPY_STATS
}
#endif // CONFIG_EXT_TX
}
@@ -2684,6 +2583,63 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
#endif // CONFIG_EXT_PARTITION_TYPES
+#if CONFIG_LPF_SB
+ if (bsize == cm->sb_size) {
+ int filt_lvl;
+ if (mi_row == 0 && mi_col == 0) {
+ filt_lvl = aom_read_literal(r, 6, ACCT_STR);
+ cm->mi_grid_visible[0]->mbmi.reuse_sb_lvl = 0;
+ cm->mi_grid_visible[0]->mbmi.delta = 0;
+ cm->mi_grid_visible[0]->mbmi.sign = 0;
+ } else {
+ int prev_mi_row, prev_mi_col;
+ if (mi_col - MAX_MIB_SIZE < 0) {
+ prev_mi_row = mi_row - MAX_MIB_SIZE;
+ prev_mi_col = mi_col;
+ } else {
+ prev_mi_row = mi_row;
+ prev_mi_col = mi_col - MAX_MIB_SIZE;
+ }
+
+ MB_MODE_INFO *curr_mbmi =
+ &cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi;
+ MB_MODE_INFO *prev_mbmi =
+ &cm->mi_grid_visible[prev_mi_row * cm->mi_stride + prev_mi_col]->mbmi;
+ const uint8_t prev_lvl = prev_mbmi->filt_lvl;
+
+ const int reuse_ctx = prev_mbmi->reuse_sb_lvl;
+ const int reuse_prev_lvl = aom_read_symbol(
+ r, xd->tile_ctx->lpf_reuse_cdf[reuse_ctx], 2, ACCT_STR);
+ curr_mbmi->reuse_sb_lvl = reuse_prev_lvl;
+
+ if (reuse_prev_lvl) {
+ filt_lvl = prev_lvl;
+ curr_mbmi->delta = 0;
+ curr_mbmi->sign = 0;
+ } else {
+ const int delta_ctx = prev_mbmi->delta;
+ unsigned int delta = aom_read_symbol(
+ r, xd->tile_ctx->lpf_delta_cdf[delta_ctx], DELTA_RANGE, ACCT_STR);
+ curr_mbmi->delta = delta;
+ delta *= LPF_STEP;
+
+ if (delta) {
+ const int sign_ctx = prev_mbmi->sign;
+ const int sign = aom_read_symbol(
+ r, xd->tile_ctx->lpf_sign_cdf[reuse_ctx][sign_ctx], 2, ACCT_STR);
+ curr_mbmi->sign = sign;
+ filt_lvl = sign ? prev_lvl + delta : prev_lvl - delta;
+ } else {
+ filt_lvl = prev_lvl;
+ curr_mbmi->sign = 0;
+ }
+ }
+ }
+
+ av1_loop_filter_sb_level_init(cm, mi_row, mi_col, filt_lvl);
+ }
+#endif
+
#if CONFIG_CDEF
if (bsize == cm->sb_size) {
int width_step = mi_size_wide[BLOCK_64X64];
@@ -2704,6 +2660,21 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
}
}
#endif // CONFIG_CDEF
+#if CONFIG_LOOP_RESTORATION
+ for (int plane = 0; plane < MAX_MB_PLANE; ++plane) {
+ int rcol0, rcol1, rrow0, rrow1, nhtiles;
+ if (av1_loop_restoration_corners_in_sb(cm, plane, mi_row, mi_col, bsize,
+ &rcol0, &rcol1, &rrow0, &rrow1,
+ &nhtiles)) {
+ for (int rrow = rrow0; rrow < rrow1; ++rrow) {
+ for (int rcol = rcol0; rcol < rcol1; ++rcol) {
+ int rtile_idx = rcol + rrow * nhtiles;
+ loop_restoration_read_sb_coeffs(cm, xd, r, plane, rtile_idx);
+ }
+ }
+ }
+ }
+#endif
}
static void setup_bool_decoder(const uint8_t *data, const uint8_t *data_end,
@@ -2736,6 +2707,7 @@ static void setup_segmentation(AV1_COMMON *const cm,
seg->update_map = 0;
seg->update_data = 0;
+ seg->temporal_update = 0;
seg->enabled = aom_rb_read_bit(rb);
if (!seg->enabled) return;
@@ -2820,16 +2792,26 @@ static void decode_restoration_mode(AV1_COMMON *cm,
cm->rst_info[1].restoration_tilesize = cm->rst_info[0].restoration_tilesize;
}
cm->rst_info[2].restoration_tilesize = cm->rst_info[1].restoration_tilesize;
+
+ cm->rst_info[0].procunit_width = cm->rst_info[0].procunit_height =
+ RESTORATION_PROC_UNIT_SIZE;
+ cm->rst_info[1].procunit_width = cm->rst_info[2].procunit_width =
+ RESTORATION_PROC_UNIT_SIZE >> cm->subsampling_x;
+ cm->rst_info[1].procunit_height = cm->rst_info[2].procunit_height =
+ RESTORATION_PROC_UNIT_SIZE >> cm->subsampling_y;
}
-static void read_wiener_filter(WienerInfo *wiener_info,
+static void read_wiener_filter(int wiener_win, WienerInfo *wiener_info,
WienerInfo *ref_wiener_info, aom_reader *rb) {
- wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] =
- aom_read_primitive_refsubexpfin(
- rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
- WIENER_FILT_TAP0_SUBEXP_K,
- ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
- WIENER_FILT_TAP0_MINV;
+ if (wiener_win == WIENER_WIN)
+ wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] =
+ aom_read_primitive_refsubexpfin(
+ rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
+ WIENER_FILT_TAP0_SUBEXP_K,
+ ref_wiener_info->vfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
+ WIENER_FILT_TAP0_MINV;
+ else
+ wiener_info->vfilter[0] = wiener_info->vfilter[WIENER_WIN - 1] = 0;
wiener_info->vfilter[1] = wiener_info->vfilter[WIENER_WIN - 2] =
aom_read_primitive_refsubexpfin(
rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
@@ -2847,12 +2829,15 @@ static void read_wiener_filter(WienerInfo *wiener_info,
-2 * (wiener_info->vfilter[0] + wiener_info->vfilter[1] +
wiener_info->vfilter[2]);
- wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] =
- aom_read_primitive_refsubexpfin(
- rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
- WIENER_FILT_TAP0_SUBEXP_K,
- ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
- WIENER_FILT_TAP0_MINV;
+ if (wiener_win == WIENER_WIN)
+ wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] =
+ aom_read_primitive_refsubexpfin(
+ rb, WIENER_FILT_TAP0_MAXV - WIENER_FILT_TAP0_MINV + 1,
+ WIENER_FILT_TAP0_SUBEXP_K,
+ ref_wiener_info->hfilter[0] - WIENER_FILT_TAP0_MINV, ACCT_STR) +
+ WIENER_FILT_TAP0_MINV;
+ else
+ wiener_info->hfilter[0] = wiener_info->hfilter[WIENER_WIN - 1] = 0;
wiener_info->hfilter[1] = wiener_info->hfilter[WIENER_WIN - 2] =
aom_read_primitive_refsubexpfin(
rb, WIENER_FILT_TAP1_MAXV - WIENER_FILT_TAP1_MINV + 1,
@@ -2888,90 +2873,43 @@ static void read_sgrproj_filter(SgrprojInfo *sgrproj_info,
memcpy(ref_sgrproj_info, sgrproj_info, sizeof(*sgrproj_info));
}
-static void decode_restoration(AV1_COMMON *cm, aom_reader *rb) {
- int i, p;
-#if CONFIG_FRAME_SUPERRES
- const int width = cm->superres_upscaled_width;
- const int height = cm->superres_upscaled_height;
-#else
- const int width = cm->width;
- const int height = cm->height;
-#endif // CONFIG_FRAME_SUPERRES
- SgrprojInfo ref_sgrproj_info;
- WienerInfo ref_wiener_info;
- set_default_wiener(&ref_wiener_info);
- set_default_sgrproj(&ref_sgrproj_info);
- const int ntiles =
- av1_get_rest_ntiles(width, height, cm->rst_info[0].restoration_tilesize,
- NULL, NULL, NULL, NULL);
- const int ntiles_uv = av1_get_rest_ntiles(
- ROUND_POWER_OF_TWO(width, cm->subsampling_x),
- ROUND_POWER_OF_TWO(height, cm->subsampling_y),
- cm->rst_info[1].restoration_tilesize, NULL, NULL, NULL, NULL);
- RestorationInfo *rsi = &cm->rst_info[0];
- if (rsi->frame_restoration_type != RESTORE_NONE) {
- if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) {
- for (i = 0; i < ntiles; ++i) {
- rsi->restoration_type[i] =
- aom_read_tree(rb, av1_switchable_restore_tree,
- cm->fc->switchable_restore_prob, ACCT_STR);
- if (rsi->restoration_type[i] == RESTORE_WIENER) {
- read_wiener_filter(&rsi->wiener_info[i], &ref_wiener_info, rb);
- } else if (rsi->restoration_type[i] == RESTORE_SGRPROJ) {
- read_sgrproj_filter(&rsi->sgrproj_info[i], &ref_sgrproj_info, rb);
- }
- }
- } else if (rsi->frame_restoration_type == RESTORE_WIENER) {
- for (i = 0; i < ntiles; ++i) {
- if (aom_read(rb, RESTORE_NONE_WIENER_PROB, ACCT_STR)) {
- rsi->restoration_type[i] = RESTORE_WIENER;
- read_wiener_filter(&rsi->wiener_info[i], &ref_wiener_info, rb);
- } else {
- rsi->restoration_type[i] = RESTORE_NONE;
- }
- }
- } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
- for (i = 0; i < ntiles; ++i) {
- if (aom_read(rb, RESTORE_NONE_SGRPROJ_PROB, ACCT_STR)) {
- rsi->restoration_type[i] = RESTORE_SGRPROJ;
- read_sgrproj_filter(&rsi->sgrproj_info[i], &ref_sgrproj_info, rb);
- } else {
- rsi->restoration_type[i] = RESTORE_NONE;
- }
- }
+static void loop_restoration_read_sb_coeffs(const AV1_COMMON *const cm,
+ MACROBLOCKD *xd,
+ aom_reader *const r, int plane,
+ int rtile_idx) {
+ const RestorationInfo *rsi = cm->rst_info + plane;
+ if (rsi->frame_restoration_type == RESTORE_NONE) return;
+
+ const int wiener_win = (plane > 0) ? WIENER_WIN_CHROMA : WIENER_WIN;
+ WienerInfo *wiener_info = xd->wiener_info + plane;
+ SgrprojInfo *sgrproj_info = xd->sgrproj_info + plane;
+
+ if (rsi->frame_restoration_type == RESTORE_SWITCHABLE) {
+ assert(plane == 0);
+ rsi->restoration_type[rtile_idx] =
+ aom_read_tree(r, av1_switchable_restore_tree,
+ cm->fc->switchable_restore_prob, ACCT_STR);
+
+ if (rsi->restoration_type[rtile_idx] == RESTORE_WIENER) {
+ read_wiener_filter(wiener_win, &rsi->wiener_info[rtile_idx], wiener_info,
+ r);
+ } else if (rsi->restoration_type[rtile_idx] == RESTORE_SGRPROJ) {
+ read_sgrproj_filter(&rsi->sgrproj_info[rtile_idx], sgrproj_info, r);
}
- }
- for (p = 1; p < MAX_MB_PLANE; ++p) {
- set_default_wiener(&ref_wiener_info);
- set_default_sgrproj(&ref_sgrproj_info);
- rsi = &cm->rst_info[p];
- if (rsi->frame_restoration_type == RESTORE_WIENER) {
- for (i = 0; i < ntiles_uv; ++i) {
- if (ntiles_uv > 1)
- rsi->restoration_type[i] =
- aom_read(rb, RESTORE_NONE_WIENER_PROB, ACCT_STR) ? RESTORE_WIENER
- : RESTORE_NONE;
- else
- rsi->restoration_type[i] = RESTORE_WIENER;
- if (rsi->restoration_type[i] == RESTORE_WIENER) {
- read_wiener_filter(&rsi->wiener_info[i], &ref_wiener_info, rb);
- }
- }
- } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
- for (i = 0; i < ntiles_uv; ++i) {
- if (ntiles_uv > 1)
- rsi->restoration_type[i] =
- aom_read(rb, RESTORE_NONE_SGRPROJ_PROB, ACCT_STR)
- ? RESTORE_SGRPROJ
- : RESTORE_NONE;
- else
- rsi->restoration_type[i] = RESTORE_SGRPROJ;
- if (rsi->restoration_type[i] == RESTORE_SGRPROJ) {
- read_sgrproj_filter(&rsi->sgrproj_info[i], &ref_sgrproj_info, rb);
- }
- }
- } else if (rsi->frame_restoration_type != RESTORE_NONE) {
- assert(0);
+ } else if (rsi->frame_restoration_type == RESTORE_WIENER) {
+ if (aom_read(r, RESTORE_NONE_WIENER_PROB, ACCT_STR)) {
+ rsi->restoration_type[rtile_idx] = RESTORE_WIENER;
+ read_wiener_filter(wiener_win, &rsi->wiener_info[rtile_idx], wiener_info,
+ r);
+ } else {
+ rsi->restoration_type[rtile_idx] = RESTORE_NONE;
+ }
+ } else if (rsi->frame_restoration_type == RESTORE_SGRPROJ) {
+ if (aom_read(r, RESTORE_NONE_SGRPROJ_PROB, ACCT_STR)) {
+ rsi->restoration_type[rtile_idx] = RESTORE_SGRPROJ;
+ read_sgrproj_filter(&rsi->sgrproj_info[rtile_idx], sgrproj_info, r);
+ } else {
+ rsi->restoration_type[rtile_idx] = RESTORE_NONE;
}
}
}
@@ -2979,13 +2917,18 @@ static void decode_restoration(AV1_COMMON *cm, aom_reader *rb) {
static void setup_loopfilter(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
struct loopfilter *lf = &cm->lf;
- lf->filter_level = aom_rb_read_literal(rb, 6);
-#if CONFIG_UV_LVL
- if (lf->filter_level > 0) {
+#if !CONFIG_LPF_SB
+#if CONFIG_LOOPFILTER_LEVEL
+ lf->filter_level[0] = aom_rb_read_literal(rb, 6);
+ lf->filter_level[1] = aom_rb_read_literal(rb, 6);
+ if (lf->filter_level[0] || lf->filter_level[1]) {
lf->filter_level_u = aom_rb_read_literal(rb, 6);
lf->filter_level_v = aom_rb_read_literal(rb, 6);
}
+#else
+ lf->filter_level = aom_rb_read_literal(rb, 6);
#endif
+#endif // CONFIG_LPF_SB
lf->sharpness_level = aom_rb_read_literal(rb, 3);
// Read in loop filter deltas applied at the MB level based on mode or ref
@@ -3012,13 +2955,19 @@ static void setup_loopfilter(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
#if CONFIG_CDEF
static void setup_cdef(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
int i;
- cm->cdef_dering_damping = aom_rb_read_literal(rb, 1) + 5;
- cm->cdef_clpf_damping = aom_rb_read_literal(rb, 2) + 3;
+#if CONFIG_CDEF_SINGLEPASS
+ cm->cdef_pri_damping = cm->cdef_sec_damping = aom_rb_read_literal(rb, 2) + 3;
+#else
+ cm->cdef_pri_damping = aom_rb_read_literal(rb, 1) + 5;
+ cm->cdef_sec_damping = aom_rb_read_literal(rb, 2) + 3;
+#endif
cm->cdef_bits = aom_rb_read_literal(rb, 2);
cm->nb_cdef_strengths = 1 << cm->cdef_bits;
for (i = 0; i < cm->nb_cdef_strengths; i++) {
cm->cdef_strengths[i] = aom_rb_read_literal(rb, CDEF_STRENGTH_BITS);
- cm->cdef_uv_strengths[i] = aom_rb_read_literal(rb, CDEF_STRENGTH_BITS);
+ cm->cdef_uv_strengths[i] = cm->subsampling_x == cm->subsampling_y
+ ? aom_rb_read_literal(rb, CDEF_STRENGTH_BITS)
+ : 0;
}
}
#endif // CONFIG_CDEF
@@ -3116,28 +3065,20 @@ static void setup_superres(AV1_COMMON *const cm, struct aom_read_bit_buffer *rb,
cm->superres_upscaled_width = *width;
cm->superres_upscaled_height = *height;
if (aom_rb_read_bit(rb)) {
- cm->superres_scale_numerator =
+ cm->superres_scale_denominator =
(uint8_t)aom_rb_read_literal(rb, SUPERRES_SCALE_BITS);
- cm->superres_scale_numerator += SUPERRES_SCALE_NUMERATOR_MIN;
+ cm->superres_scale_denominator += SUPERRES_SCALE_DENOMINATOR_MIN;
// Don't edit cm->width or cm->height directly, or the buffers won't get
// resized correctly
- av1_calculate_scaled_size(width, height, cm->superres_scale_numerator);
+ av1_calculate_scaled_superres_size(width, height,
+ cm->superres_scale_denominator);
} else {
// 1:1 scaling - ie. no scaling, scale not provided
- cm->superres_scale_numerator = SCALE_DENOMINATOR;
+ cm->superres_scale_denominator = SCALE_NUMERATOR;
}
}
#endif // CONFIG_FRAME_SUPERRES
-static void resize_mv_buffer(AV1_COMMON *cm) {
- aom_free(cm->cur_frame->mvs);
- cm->cur_frame->mi_rows = cm->mi_rows;
- cm->cur_frame->mi_cols = cm->mi_cols;
- CHECK_MEM_ERROR(cm, cm->cur_frame->mvs,
- (MV_REF *)aom_calloc(cm->mi_rows * cm->mi_cols,
- sizeof(*cm->cur_frame->mvs)));
-}
-
static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
#if CONFIG_SIZE_LIMIT
if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
@@ -3164,10 +3105,10 @@ static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
cm->width = width;
cm->height = height;
}
- if (cm->cur_frame->mvs == NULL || cm->mi_rows > cm->cur_frame->mi_rows ||
- cm->mi_cols > cm->cur_frame->mi_cols) {
- resize_mv_buffer(cm);
- }
+
+ ensure_mv_buffer(cm->cur_frame, cm);
+ cm->cur_frame->width = cm->width;
+ cm->cur_frame->height = cm->height;
}
static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
@@ -3211,6 +3152,15 @@ static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
+static void setup_sb_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+ (void)rb;
+#if CONFIG_EXT_PARTITION
+ set_sb_size(cm, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
+#else
+ set_sb_size(cm, BLOCK_64X64);
+#endif // CONFIG_EXT_PARTITION
+}
+
static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
int ref_xss, int ref_yss,
aom_bit_depth_t this_bit_depth,
@@ -3306,6 +3256,89 @@ static void setup_frame_size_with_refs(AV1_COMMON *cm,
pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
}
+static void read_tile_group_range(AV1Decoder *pbi,
+ struct aom_read_bit_buffer *const rb) {
+ AV1_COMMON *const cm = &pbi->common;
+ const int num_bits = cm->log2_tile_rows + cm->log2_tile_cols;
+ const int num_tiles =
+ cm->tile_rows * cm->tile_cols; // Note: May be < (1<<num_bits)
+ pbi->tg_start = aom_rb_read_literal(rb, num_bits);
+ pbi->tg_size = 1 + aom_rb_read_literal(rb, num_bits);
+ if (pbi->tg_start + pbi->tg_size > num_tiles)
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Tile group extends past last tile in frame");
+}
+
+#if CONFIG_MAX_TILE
+
+// Same function as av1_read_uniform but reading from uncompresses header wb
+static int rb_read_uniform(struct aom_read_bit_buffer *const rb, int n) {
+ const int l = get_unsigned_bits(n);
+ const int m = (1 << l) - n;
+ const int v = aom_rb_read_literal(rb, l - 1);
+ assert(l != 0);
+ if (v < m)
+ return v;
+ else
+ return (v << 1) - m + aom_rb_read_literal(rb, 1);
+}
+
+static void read_tile_info_max_tile(AV1_COMMON *const cm,
+ struct aom_read_bit_buffer *const rb) {
+ int width_mi = ALIGN_POWER_OF_TWO(cm->mi_cols, MAX_MIB_SIZE_LOG2);
+ int height_mi = ALIGN_POWER_OF_TWO(cm->mi_rows, MAX_MIB_SIZE_LOG2);
+ int width_sb = width_mi >> MAX_MIB_SIZE_LOG2;
+ int height_sb = height_mi >> MAX_MIB_SIZE_LOG2;
+ int start_sb, size_sb, i;
+
+ av1_get_tile_limits(cm);
+ cm->uniform_tile_spacing_flag = aom_rb_read_bit(rb);
+
+ // Read tile columns
+ if (cm->uniform_tile_spacing_flag) {
+ cm->log2_tile_cols = cm->min_log2_tile_cols;
+ while (cm->log2_tile_cols < cm->max_log2_tile_cols) {
+ if (!aom_rb_read_bit(rb)) {
+ break;
+ }
+ cm->log2_tile_cols++;
+ }
+ } else {
+ for (i = 0, start_sb = 0; width_sb > 0 && i < MAX_TILE_COLS; i++) {
+ size_sb = 1 + rb_read_uniform(rb, AOMMIN(width_sb, MAX_TILE_WIDTH_SB));
+ cm->tile_col_start_sb[i] = start_sb;
+ start_sb += size_sb;
+ width_sb -= size_sb;
+ }
+ cm->tile_cols = i;
+ cm->tile_col_start_sb[i] = start_sb + width_sb;
+ }
+ av1_calculate_tile_cols(cm);
+
+ // Read tile rows
+ if (cm->uniform_tile_spacing_flag) {
+ cm->log2_tile_rows = cm->min_log2_tile_rows;
+ while (cm->log2_tile_rows < cm->max_log2_tile_rows) {
+ if (!aom_rb_read_bit(rb)) {
+ break;
+ }
+ cm->log2_tile_rows++;
+ }
+ } else {
+ for (i = 0, start_sb = 0; height_sb > 0 && i < MAX_TILE_ROWS; i++) {
+ size_sb =
+ 1 + rb_read_uniform(rb, AOMMIN(height_sb, cm->max_tile_height_sb));
+ cm->tile_row_start_sb[i] = start_sb;
+ start_sb += size_sb;
+ height_sb -= size_sb;
+ }
+ cm->tile_rows = i;
+ cm->tile_row_start_sb[i] = start_sb + height_sb;
+ }
+ av1_calculate_tile_rows(cm);
+}
+#endif
+
static void read_tile_info(AV1Decoder *const pbi,
struct aom_read_bit_buffer *const rb) {
AV1_COMMON *const cm = &pbi->common;
@@ -3357,23 +3390,34 @@ static void read_tile_info(AV1Decoder *const pbi,
#endif
} else {
#endif // CONFIG_EXT_TILE
- int min_log2_tile_cols, max_log2_tile_cols, max_ones;
- av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
- // columns
- max_ones = max_log2_tile_cols - min_log2_tile_cols;
- cm->log2_tile_cols = min_log2_tile_cols;
- while (max_ones-- && aom_rb_read_bit(rb)) cm->log2_tile_cols++;
+#if CONFIG_MAX_TILE
+ read_tile_info_max_tile(cm, rb);
+#else
+ int min_log2_tile_cols, max_log2_tile_cols, max_ones;
+ av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+
+ // columns
+ max_ones = max_log2_tile_cols - min_log2_tile_cols;
+ cm->log2_tile_cols = min_log2_tile_cols;
+ while (max_ones-- && aom_rb_read_bit(rb)) cm->log2_tile_cols++;
- if (cm->log2_tile_cols > 6)
- aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
- "Invalid number of tile columns");
+ if (cm->log2_tile_cols > 6)
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Invalid number of tile columns");
- // rows
- cm->log2_tile_rows = aom_rb_read_bit(rb);
- if (cm->log2_tile_rows) cm->log2_tile_rows += aom_rb_read_bit(rb);
+ // rows
+ cm->log2_tile_rows = aom_rb_read_bit(rb);
+ if (cm->log2_tile_rows) cm->log2_tile_rows += aom_rb_read_bit(rb);
+
+ cm->tile_width =
+ get_tile_size(cm->mi_cols, cm->log2_tile_cols, &cm->tile_cols);
+ cm->tile_height =
+ get_tile_size(cm->mi_rows, cm->log2_tile_rows, &cm->tile_rows);
+
+#endif // CONFIG_MAX_TILE
#if CONFIG_DEPENDENT_HORZTILES
- if (cm->log2_tile_rows != 0)
+ if (cm->tile_rows > 1)
cm->dependent_horz_tiles = aom_rb_read_bit(rb);
else
cm->dependent_horz_tiles = 0;
@@ -3382,33 +3426,18 @@ static void read_tile_info(AV1Decoder *const pbi,
cm->loop_filter_across_tiles_enabled = aom_rb_read_bit(rb);
#endif // CONFIG_LOOPFILTERING_ACROSS_TILES
- cm->tile_cols = 1 << cm->log2_tile_cols;
- cm->tile_rows = 1 << cm->log2_tile_rows;
-
- cm->tile_width = ALIGN_POWER_OF_TWO(cm->mi_cols, MAX_MIB_SIZE_LOG2);
- cm->tile_width >>= cm->log2_tile_cols;
- cm->tile_height = ALIGN_POWER_OF_TWO(cm->mi_rows, MAX_MIB_SIZE_LOG2);
- cm->tile_height >>= cm->log2_tile_rows;
-
- // round to integer multiples of superblock size
- cm->tile_width = ALIGN_POWER_OF_TWO(cm->tile_width, MAX_MIB_SIZE_LOG2);
- cm->tile_height = ALIGN_POWER_OF_TWO(cm->tile_height, MAX_MIB_SIZE_LOG2);
-
// tile size magnitude
pbi->tile_size_bytes = aom_rb_read_literal(rb, 2) + 1;
#if CONFIG_EXT_TILE
}
#endif // CONFIG_EXT_TILE
+// each tile group header is in its own tile group OBU
+#if !CONFIG_OBU
// Store an index to the location of the tile group information
pbi->tg_size_bit_offset = rb->bit_offset;
- pbi->tg_size = 1 << (cm->log2_tile_rows + cm->log2_tile_cols);
- if (cm->log2_tile_rows + cm->log2_tile_cols > 0) {
- pbi->tg_start =
- aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
- pbi->tg_size =
- 1 + aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
- }
+ read_tile_group_range(pbi, rb);
+#endif
}
static int mem_get_varsize(const uint8_t *src, int sz) {
@@ -3605,9 +3634,10 @@ static void get_tile_buffer(const uint8_t *const data_end,
*data += size;
}
-static void get_tile_buffers(
- AV1Decoder *pbi, const uint8_t *data, const uint8_t *data_end,
- TileBufferDec (*const tile_buffers)[MAX_TILE_COLS]) {
+static void get_tile_buffers(AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end,
+ TileBufferDec (*const tile_buffers)[MAX_TILE_COLS],
+ int startTile, int endTile) {
AV1_COMMON *const cm = &pbi->common;
int r, c;
const int tile_cols = cm->tile_cols;
@@ -3616,10 +3646,13 @@ static void get_tile_buffers(
int first_tile_in_tg = 0;
struct aom_read_bit_buffer rb_tg_hdr;
uint8_t clear_data[MAX_AV1_HEADER_SIZE];
- const int num_tiles = tile_rows * tile_cols;
- const int num_bits = OD_ILOG(num_tiles) - 1;
+#if !CONFIG_OBU
const size_t hdr_size = pbi->uncomp_hdr_size + pbi->first_partition_size;
const int tg_size_bit_offset = pbi->tg_size_bit_offset;
+#else
+ const int tg_size_bit_offset = 0;
+#endif
+
#if CONFIG_DEPENDENT_HORZTILES
int tile_group_start_col = 0;
int tile_group_start_row = 0;
@@ -3628,21 +3661,28 @@ static void get_tile_buffers(
for (r = 0; r < tile_rows; ++r) {
for (c = 0; c < tile_cols; ++c, ++tc) {
TileBufferDec *const buf = &tile_buffers[r][c];
+#if CONFIG_OBU
+ const int is_last = (tc == endTile);
+ const size_t hdr_offset = 0;
+#else
const int is_last = (r == tile_rows - 1) && (c == tile_cols - 1);
const size_t hdr_offset = (tc && tc == first_tile_in_tg) ? hdr_size : 0;
+#endif
+
+ if (tc < startTile || tc > endTile) continue;
+ if (data + hdr_offset >= data_end)
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Data ended before all tiles were read.");
buf->col = c;
if (hdr_offset) {
init_read_bit_buffer(pbi, &rb_tg_hdr, data, data_end, clear_data);
rb_tg_hdr.bit_offset = tg_size_bit_offset;
- if (num_tiles) {
- pbi->tg_start = aom_rb_read_literal(&rb_tg_hdr, num_bits);
- pbi->tg_size = 1 + aom_rb_read_literal(&rb_tg_hdr, num_bits);
+ read_tile_group_range(pbi, &rb_tg_hdr);
#if CONFIG_DEPENDENT_HORZTILES
- tile_group_start_row = r;
- tile_group_start_col = c;
+ tile_group_start_row = r;
+ tile_group_start_col = c;
#endif
- }
}
first_tile_in_tg += tc == first_tile_in_tg ? pbi->tg_size : 0;
data += hdr_offset;
@@ -3665,10 +3705,6 @@ static void daala_dec_init(AV1_COMMON *const cm, daala_dec_ctx *daala_dec,
// TODO(yushin) : activity masking info needs be signaled by a bitstream
daala_dec->use_activity_masking = AV1_PVQ_ENABLE_ACTIVITY_MASKING;
-#if !CONFIG_DAALA_DIST
- daala_dec->use_activity_masking = 0;
-#endif
-
if (daala_dec->use_activity_masking)
daala_dec->qm = OD_HVS_QM;
else
@@ -3707,8 +3743,22 @@ static void daala_dec_init(AV1_COMMON *const cm, daala_dec_ctx *daala_dec,
}
#endif // #if CONFIG_PVQ
+#if CONFIG_LOOPFILTERING_ACROSS_TILES
+static void dec_setup_across_tile_boundary_info(
+ const AV1_COMMON *const cm, const TileInfo *const tile_info) {
+ if (tile_info->mi_row_start >= tile_info->mi_row_end ||
+ tile_info->mi_col_start >= tile_info->mi_col_end)
+ return;
+
+ if (!cm->loop_filter_across_tiles_enabled) {
+ av1_setup_across_tile_boundary_info(cm, tile_info);
+ }
+}
+#endif // CONFIG_LOOPFILTERING_ACROSS_TILES
+
static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
- const uint8_t *data_end) {
+ const uint8_t *data_end, int startTile,
+ int endTile) {
AV1_COMMON *const cm = &pbi->common;
const AVxWorkerInterface *const winterface = aom_get_worker_interface();
const int tile_cols = cm->tile_cols;
@@ -3776,7 +3826,7 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
else
#endif // CONFIG_EXT_TILE
- get_tile_buffers(pbi, data, data_end, tile_buffers);
+ get_tile_buffers(pbi, data, data_end, tile_buffers, startTile, endTile);
if (pbi->tile_data == NULL || n_tiles != pbi->allocated_tiles) {
aom_free(pbi->tile_data);
@@ -3795,6 +3845,10 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
const TileBufferDec *const buf = &tile_buffers[tile_row][tile_col];
TileData *const td = pbi->tile_data + tile_cols * tile_row + tile_col;
+ if (tile_row * cm->tile_cols + tile_col < startTile ||
+ tile_row * cm->tile_cols + tile_col > endTile)
+ continue;
+
td->cm = cm;
td->xd = pbi->mb;
td->xd.corrupted = 0;
@@ -3838,10 +3892,11 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
td->xd.daala_dec.state.adapt = &td->tctx.pvq_context;
#endif
-#if CONFIG_PALETTE
td->xd.plane[0].color_index_map = td->color_index_map[0];
td->xd.plane[1].color_index_map = td->color_index_map[1];
-#endif // CONFIG_PALETTE
+#if CONFIG_MRC_TX
+ td->xd.mrc_mask = td->mrc_mask;
+#endif // CONFIG_MRC_TX
}
}
@@ -3855,6 +3910,11 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
for (tile_col = tile_cols_start; tile_col < tile_cols_end; ++tile_col) {
const int col = inv_col_order ? tile_cols - 1 - tile_col : tile_col;
TileData *const td = pbi->tile_data + tile_cols * row + col;
+
+ if (tile_row * cm->tile_cols + tile_col < startTile ||
+ tile_row * cm->tile_cols + tile_col > endTile)
+ continue;
+
#if CONFIG_ACCOUNTING
if (pbi->acct_enabled) {
td->bit_reader.accounting->last_tell_frac =
@@ -3874,8 +3934,16 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
#else
av1_zero_above_context(cm, tile_info.mi_col_start, tile_info.mi_col_end);
#endif
+#if CONFIG_LOOP_RESTORATION
+ for (int p = 0; p < MAX_MB_PLANE; ++p) {
+ set_default_wiener(td->xd.wiener_info + p);
+ set_default_sgrproj(td->xd.sgrproj_info + p);
+ }
+#endif // CONFIG_LOOP_RESTORATION
- av1_setup_across_tile_boundary_info(cm, &tile_info);
+#if CONFIG_LOOPFILTERING_ACROSS_TILES
+ dec_setup_across_tile_boundary_info(cm, &tile_info);
+#endif // CONFIG_LOOPFILTERING_ACROSS_TILES
for (mi_row = tile_info.mi_row_start; mi_row < tile_info.mi_row_end;
mi_row += cm->mib_size) {
@@ -3885,15 +3953,22 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
for (mi_col = tile_info.mi_col_start; mi_col < tile_info.mi_col_end;
mi_col += cm->mib_size) {
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+ alloc_ncobmc_pred_buffer(&td->xd);
+ set_sb_mi_boundaries(cm, &td->xd, mi_row, mi_col);
+#endif
decode_partition(pbi, &td->xd,
#if CONFIG_SUPERTX
0,
#endif // CONFIG_SUPERTX
mi_row, mi_col, &td->bit_reader, cm->sb_size);
-#if (CONFIG_NCOBMC || CONFIG_NCOBMC_ADAPT_WEIGHT) && CONFIG_MOTION_VAR
+#if NC_MODE_INFO && CONFIG_MOTION_VAR
detoken_and_recon_sb(pbi, &td->xd, mi_row, mi_col, &td->bit_reader,
cm->sb_size);
#endif
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+ free_ncobmc_pred_buffer(&td->xd);
+#endif
}
aom_merge_corrupted_flag(&pbi->mb.corrupted, td->xd.corrupted);
if (pbi->mb.corrupted)
@@ -3902,7 +3977,9 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
}
}
+#if !CONFIG_OBU
assert(mi_row > 0);
+#endif
// when Parallel deblocking is enabled, deblocking should not
// be interleaved with decoding. Instead, deblocking should be done
@@ -3942,19 +4019,27 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
#if CONFIG_VAR_TX || CONFIG_CB4X4
// Loopfilter the whole frame.
-#if CONFIG_UV_LVL
- if (cm->lf.filter_level > 0) {
+#if CONFIG_LPF_SB
+ av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
+ cm->lf.filter_level, 0, 0, 0, 0);
+#else
+#if CONFIG_LOOPFILTER_LEVEL
+ if (cm->lf.filter_level[0] || cm->lf.filter_level[1]) {
av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
- cm->lf.filter_level, 0, 0);
+ cm->lf.filter_level[0], cm->lf.filter_level[1], 0, 0);
av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
- cm->lf.filter_level_u, 1, 0);
+ cm->lf.filter_level_u, cm->lf.filter_level_u, 1, 0);
av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
- cm->lf.filter_level_v, 2, 0);
+ cm->lf.filter_level_v, cm->lf.filter_level_v, 2, 0);
}
#else
- av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
- cm->lf.filter_level, 0, 0);
-#endif // CONFIG_UV_LVL
+#if CONFIG_OBU
+ if (endTile == cm->tile_rows * cm->tile_cols - 1)
+#endif
+ av1_loop_filter_frame(get_frame_new_buffer(cm), cm, &pbi->mb,
+ cm->lf.filter_level, 0, 0);
+#endif // CONFIG_LOOPFILTER_LEVEL
+#endif // CONFIG_LPF_SB
#else
#if CONFIG_PARALLEL_DEBLOCKING
// Loopfilter all rows in the frame in the frame.
@@ -3997,11 +4082,16 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
#if CONFIG_ANS
return data_end;
#else
+#if !CONFIG_OBU
{
// Get last tile data.
TileData *const td = pbi->tile_data + tile_cols * tile_rows - 1;
return aom_reader_find_end(&td->bit_reader);
}
+#else
+ TileData *const td = pbi->tile_data + endTile;
+ return aom_reader_find_end(&td->bit_reader);
+#endif
#endif // CONFIG_ANS
#if CONFIG_EXT_TILE
}
@@ -4041,7 +4131,7 @@ static int tile_worker_hook(TileWorkerData *const tile_data,
0,
#endif
mi_row, mi_col, &tile_data->bit_reader, cm->sb_size);
-#if (CONFIG_NCOBMC || CONFIG_NCOBMC_ADAPT_WEIGHT) && CONFIG_MOTION_VAR
+#if NC_MODE_INFO && CONFIG_MOTION_VAR
detoken_and_recon_sb(pbi, &tile_data->xd, mi_row, mi_col,
&tile_data->bit_reader, cm->sb_size);
#endif
@@ -4152,7 +4242,8 @@ static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
get_ls_tile_buffers(pbi, data, data_end, tile_buffers);
else
#endif // CONFIG_EXT_TILE
- get_tile_buffers(pbi, data, data_end, tile_buffers);
+ get_tile_buffers(pbi, data, data_end, tile_buffers, 0,
+ cm->tile_rows * cm->tile_cols - 1);
for (tile_row = tile_rows_start; tile_row < tile_rows_end; ++tile_row) {
// Sort the buffers in this tile row based on size in descending order.
@@ -4197,7 +4288,9 @@ static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
av1_tile_init(tile_info, cm, tile_row, buf->col);
av1_tile_init(&twd->xd.tile, cm, tile_row, buf->col);
- av1_setup_across_tile_boundary_info(cm, tile_info);
+#if CONFIG_LOOPFILTERING_ACROSS_TILES
+ dec_setup_across_tile_boundary_info(cm, tile_info);
+#endif // CONFIG_LOOPFILTERING_ACROSS_TILES
setup_bool_decoder(buf->data, data_end, buf->size, &cm->error,
&twd->bit_reader,
@@ -4220,10 +4313,8 @@ static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
// Initialise the tile context from the frame context
twd->tctx = *cm->fc;
twd->xd.tile_ctx = &twd->tctx;
-#if CONFIG_PALETTE
twd->xd.plane[0].color_index_map = twd->color_index_map[0];
twd->xd.plane[1].color_index_map = twd->color_index_map[1];
-#endif // CONFIG_PALETTE
worker->had_error = 0;
if (i == num_workers - 1 || tile_col == tile_cols_end - 1) {
@@ -4341,15 +4432,17 @@ static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm,
}
#if CONFIG_REFERENCE_BUFFER
-void read_sequence_header(SequenceHeader *seq_params) {
+void read_sequence_header(SequenceHeader *seq_params,
+ struct aom_read_bit_buffer *rb) {
/* Placeholder for actually reading from the bitstream */
- seq_params->frame_id_numbers_present_flag = FRAME_ID_NUMBERS_PRESENT_FLAG;
- seq_params->frame_id_length_minus7 = FRAME_ID_LENGTH_MINUS7;
- seq_params->delta_frame_id_length_minus2 = DELTA_FRAME_ID_LENGTH_MINUS2;
+ seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb);
+ if (seq_params->frame_id_numbers_present_flag) {
+ seq_params->frame_id_length_minus7 = aom_rb_read_literal(rb, 4);
+ seq_params->delta_frame_id_length_minus2 = aom_rb_read_literal(rb, 4);
+ }
}
-#endif
+#endif // CONFIG_REFERENCE_BUFFER
-#if CONFIG_EXT_INTER
static void read_compound_tools(AV1_COMMON *cm,
struct aom_read_bit_buffer *rb) {
(void)cm;
@@ -4373,7 +4466,6 @@ static void read_compound_tools(AV1_COMMON *cm,
}
#endif // CONFIG_WEDGE || CONFIG_COMPOUND_SEGMENT
}
-#endif // CONFIG_EXT_INTER
#if CONFIG_VAR_REFS
static void check_valid_ref_frames(AV1_COMMON *cm) {
@@ -4407,6 +4499,142 @@ static void check_valid_ref_frames(AV1_COMMON *cm) {
}
#endif // CONFIG_VAR_REFS
+#if CONFIG_GLOBAL_MOTION
+static int read_global_motion_params(WarpedMotionParams *params,
+ const WarpedMotionParams *ref_params,
+ struct aom_read_bit_buffer *rb,
+ int allow_hp) {
+ TransformationType type = aom_rb_read_bit(rb);
+ if (type != IDENTITY) {
+#if GLOBAL_TRANS_TYPES > 4
+ type += aom_rb_read_literal(rb, GLOBAL_TYPE_BITS);
+#else
+ if (aom_rb_read_bit(rb))
+ type = ROTZOOM;
+ else
+ type = aom_rb_read_bit(rb) ? TRANSLATION : AFFINE;
+#endif // GLOBAL_TRANS_TYPES > 4
+ }
+
+ int trans_bits;
+ int trans_dec_factor;
+ int trans_prec_diff;
+ *params = default_warp_params;
+ params->wmtype = type;
+ switch (type) {
+ case HOMOGRAPHY:
+ case HORTRAPEZOID:
+ case VERTRAPEZOID:
+ if (type != HORTRAPEZOID)
+ params->wmmat[6] =
+ aom_rb_read_signed_primitive_refsubexpfin(
+ rb, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF)) *
+ GM_ROW3HOMO_DECODE_FACTOR;
+ if (type != VERTRAPEZOID)
+ params->wmmat[7] =
+ aom_rb_read_signed_primitive_refsubexpfin(
+ rb, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF)) *
+ GM_ROW3HOMO_DECODE_FACTOR;
+ case AFFINE:
+ case ROTZOOM:
+ params->wmmat[2] = aom_rb_read_signed_primitive_refsubexpfin(
+ rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) -
+ (1 << GM_ALPHA_PREC_BITS)) *
+ GM_ALPHA_DECODE_FACTOR +
+ (1 << WARPEDMODEL_PREC_BITS);
+ if (type != VERTRAPEZOID)
+ params->wmmat[3] = aom_rb_read_signed_primitive_refsubexpfin(
+ rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF)) *
+ GM_ALPHA_DECODE_FACTOR;
+ if (type >= AFFINE) {
+ if (type != HORTRAPEZOID)
+ params->wmmat[4] = aom_rb_read_signed_primitive_refsubexpfin(
+ rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF)) *
+ GM_ALPHA_DECODE_FACTOR;
+ params->wmmat[5] = aom_rb_read_signed_primitive_refsubexpfin(
+ rb, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
+ (1 << GM_ALPHA_PREC_BITS)) *
+ GM_ALPHA_DECODE_FACTOR +
+ (1 << WARPEDMODEL_PREC_BITS);
+ } else {
+ params->wmmat[4] = -params->wmmat[3];
+ params->wmmat[5] = params->wmmat[2];
+ }
+ // fallthrough intended
+ case TRANSLATION:
+ trans_bits = (type == TRANSLATION) ? GM_ABS_TRANS_ONLY_BITS - !allow_hp
+ : GM_ABS_TRANS_BITS;
+ trans_dec_factor = (type == TRANSLATION)
+ ? GM_TRANS_ONLY_DECODE_FACTOR * (1 << !allow_hp)
+ : GM_TRANS_DECODE_FACTOR;
+ trans_prec_diff = (type == TRANSLATION)
+ ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp
+ : GM_TRANS_PREC_DIFF;
+ params->wmmat[0] = aom_rb_read_signed_primitive_refsubexpfin(
+ rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[0] >> trans_prec_diff)) *
+ trans_dec_factor;
+ params->wmmat[1] = aom_rb_read_signed_primitive_refsubexpfin(
+ rb, (1 << trans_bits) + 1, SUBEXPFIN_K,
+ (ref_params->wmmat[1] >> trans_prec_diff)) *
+ trans_dec_factor;
+ case IDENTITY: break;
+ default: assert(0);
+ }
+ if (params->wmtype <= AFFINE) {
+ int good_shear_params = get_shear_params(params);
+ if (!good_shear_params) return 0;
+ }
+
+ return 1;
+}
+
+static void read_global_motion(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
+ int frame;
+ for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
+ const WarpedMotionParams *ref_params =
+ cm->error_resilient_mode ? &default_warp_params
+ : &cm->prev_frame->global_motion[frame];
+ int good_params = read_global_motion_params(
+ &cm->global_motion[frame], ref_params, rb, cm->allow_high_precision_mv);
+ if (!good_params)
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Invalid shear parameters for global motion.");
+
+ // TODO(sarahparker, debargha): The logic in the commented out code below
+ // does not work currently and causes mismatches when resize is on. Fix it
+ // before turning the optimization back on.
+ /*
+ YV12_BUFFER_CONFIG *ref_buf = get_ref_frame(cm, frame);
+ if (cm->width == ref_buf->y_crop_width &&
+ cm->height == ref_buf->y_crop_height) {
+ read_global_motion_params(&cm->global_motion[frame],
+ &cm->prev_frame->global_motion[frame], rb,
+ cm->allow_high_precision_mv);
+ } else {
+ cm->global_motion[frame] = default_warp_params;
+ }
+ */
+ /*
+ printf("Dec Ref %d [%d/%d]: %d %d %d %d\n",
+ frame, cm->current_video_frame, cm->show_frame,
+ cm->global_motion[frame].wmmat[0],
+ cm->global_motion[frame].wmmat[1],
+ cm->global_motion[frame].wmmat[2],
+ cm->global_motion[frame].wmmat[3]);
+ */
+ }
+ memcpy(cm->cur_frame->global_motion, cm->global_motion,
+ TOTAL_REFS_PER_FRAME * sizeof(WarpedMotionParams));
+}
+#endif // CONFIG_GLOBAL_MOTION
+
static size_t read_uncompressed_header(AV1Decoder *pbi,
struct aom_read_bit_buffer *rb) {
AV1_COMMON *const cm = &pbi->common;
@@ -4416,11 +4644,6 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
int i, mask, ref_index = 0;
size_t sz;
-#if CONFIG_REFERENCE_BUFFER
- /* TODO: Move outside frame loop or inside key-frame branch */
- read_sequence_header(&pbi->seq_params);
-#endif
-
cm->last_frame_type = cm->frame_type;
cm->last_intra_only = cm->intra_only;
@@ -4429,6 +4652,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
cm->is_reference_frame = 1;
#endif // CONFIG_EXT_REFS
+#if !CONFIG_OBU
if (aom_rb_read_literal(rb, 2) != AOM_FRAME_MARKER)
aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Invalid frame marker");
@@ -4441,11 +4665,12 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
if (cm->profile >= MAX_SUPPORTED_PROFILE)
aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
"Unsupported bitstream profile");
+#endif
#if CONFIG_EXT_TILE
cm->large_scale_tile = aom_rb_read_literal(rb, 1);
#if CONFIG_REFERENCE_BUFFER
- if (cm->large_scale_tile) pbi->seq_params.frame_id_numbers_present_flag = 0;
+ if (cm->large_scale_tile) cm->seq_params.frame_id_numbers_present_flag = 0;
#endif // CONFIG_REFERENCE_BUFFER
#endif // CONFIG_EXT_TILE
@@ -4456,11 +4681,11 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
const int existing_frame_idx = aom_rb_read_literal(rb, 3);
const int frame_to_show = cm->ref_frame_map[existing_frame_idx];
#if CONFIG_REFERENCE_BUFFER
- if (pbi->seq_params.frame_id_numbers_present_flag) {
- int frame_id_length = pbi->seq_params.frame_id_length_minus7 + 7;
+ if (cm->seq_params.frame_id_numbers_present_flag) {
+ int frame_id_length = cm->seq_params.frame_id_length_minus7 + 7;
int display_frame_id = aom_rb_read_literal(rb, frame_id_length);
/* Compare display_frame_id with ref_frame_id and check valid for
- * referencing */
+ * referencing */
if (display_frame_id != cm->ref_frame_id[existing_frame_idx] ||
cm->valid_for_referencing[existing_frame_idx] == 0)
aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
@@ -4477,7 +4702,12 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
unlock_buffer_pool(pool);
+#if CONFIG_LOOPFILTER_LEVEL
+ cm->lf.filter_level[0] = 0;
+ cm->lf.filter_level[1] = 0;
+#else
cm->lf.filter_level = 0;
+#endif
cm->show_frame = 1;
pbi->refresh_frame_flags = 0;
@@ -4489,13 +4719,24 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
return 0;
}
+#if !CONFIG_OBU
cm->frame_type = (FRAME_TYPE)aom_rb_read_bit(rb);
cm->show_frame = aom_rb_read_bit(rb);
+ if (cm->frame_type != KEY_FRAME)
+ cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb);
+#else
+ cm->frame_type = (FRAME_TYPE)aom_rb_read_literal(rb, 2); // 2 bits
+ cm->show_frame = aom_rb_read_bit(rb);
+ cm->intra_only = cm->frame_type == INTRA_ONLY_FRAME;
+#endif
cm->error_resilient_mode = aom_rb_read_bit(rb);
#if CONFIG_REFERENCE_BUFFER
- if (pbi->seq_params.frame_id_numbers_present_flag) {
- int frame_id_length = pbi->seq_params.frame_id_length_minus7 + 7;
- int diff_len = pbi->seq_params.delta_frame_id_length_minus2 + 2;
+#if !CONFIG_OBU
+ if (frame_is_intra_only(cm)) read_sequence_header(&cm->seq_params, rb);
+#endif // !CONFIG_OBU
+ if (cm->seq_params.frame_id_numbers_present_flag) {
+ int frame_id_length = cm->seq_params.frame_id_length_minus7 + 7;
+ int diff_len = cm->seq_params.delta_frame_id_length_minus2 + 2;
int prev_frame_id = 0;
if (cm->frame_type != KEY_FRAME) {
prev_frame_id = cm->current_frame_id;
@@ -4533,13 +4774,11 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
}
}
}
-#endif
+#endif // CONFIG_REFERENCE_BUFFER
if (cm->frame_type == KEY_FRAME) {
- if (!av1_read_sync_code(rb))
- aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
- "Invalid frame sync code");
-
+#if !CONFIG_OBU
read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
+#endif
pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
@@ -4551,6 +4790,8 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
}
setup_frame_size(cm, rb);
+ setup_sb_size(cm, rb);
+
if (pbi->need_resync) {
memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
@@ -4558,20 +4799,30 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
#if CONFIG_ANS && ANS_MAX_SYMBOLS
cm->ans_window_size_log2 = aom_rb_read_literal(rb, 4) + 8;
#endif // CONFIG_ANS && ANS_MAX_SYMBOLS
-#if CONFIG_PALETTE || CONFIG_INTRABC
cm->allow_screen_content_tools = aom_rb_read_bit(rb);
-#endif // CONFIG_PALETTE || CONFIG_INTRABC
+#if CONFIG_AMVR
+ if (cm->allow_screen_content_tools) {
+ if (aom_rb_read_bit(rb)) {
+ cm->seq_mv_precision_level = 2;
+ } else {
+ cm->seq_mv_precision_level = aom_rb_read_bit(rb) ? 0 : 1;
+ }
+ } else {
+ cm->seq_mv_precision_level = 0;
+ }
+#endif
#if CONFIG_TEMPMV_SIGNALING
cm->use_prev_frame_mvs = 0;
#endif
} else {
- cm->intra_only = cm->show_frame ? 0 : aom_rb_read_bit(rb);
-#if CONFIG_PALETTE || CONFIG_INTRABC
if (cm->intra_only) cm->allow_screen_content_tools = aom_rb_read_bit(rb);
-#endif // CONFIG_PALETTE || CONFIG_INTRABC
#if CONFIG_TEMPMV_SIGNALING
if (cm->intra_only || cm->error_resilient_mode) cm->use_prev_frame_mvs = 0;
#endif
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+// The only way to reset all frame contexts to their default values is with a
+// keyframe.
+#else
if (cm->error_resilient_mode) {
cm->reset_frame_context = RESET_FRAME_CONTEXT_ALL;
} else {
@@ -4589,16 +4840,16 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
: RESET_FRAME_CONTEXT_CURRENT;
}
}
+#endif
if (cm->intra_only) {
- if (!av1_read_sync_code(rb))
- aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM,
- "Invalid frame sync code");
-
+#if !CONFIG_OBU
read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
+#endif
pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
setup_frame_size(cm, rb);
+ setup_sb_size(cm, rb);
if (pbi->need_resync) {
memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
@@ -4607,7 +4858,13 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
cm->ans_window_size_log2 = aom_rb_read_literal(rb, 4) + 8;
#endif
} else if (pbi->need_resync != 1) { /* Skip if need resync */
+#if CONFIG_OBU
+ pbi->refresh_frame_flags = (cm->frame_type == S_FRAME)
+ ? ~(1 << REF_FRAMES)
+ : aom_rb_read_literal(rb, REF_FRAMES);
+#else
pbi->refresh_frame_flags = aom_rb_read_literal(rb, REF_FRAMES);
+#endif
#if CONFIG_EXT_REFS
if (!pbi->refresh_frame_flags) {
@@ -4620,27 +4877,51 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
const int ref = aom_rb_read_literal(rb, REF_FRAMES_LOG2);
const int idx = cm->ref_frame_map[ref];
+
+ // Most of the time, streams start with a keyframe. In that case,
+ // ref_frame_map will have been filled in at that point and will not
+ // contain any -1's. However, streams are explicitly allowed to start
+ // with an intra-only frame, so long as they don't then signal a
+ // reference to a slot that hasn't been set yet. That's what we are
+ // checking here.
+ if (idx == -1)
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Inter frame requests nonexistent reference");
+
RefBuffer *const ref_frame = &cm->frame_refs[i];
ref_frame->idx = idx;
ref_frame->buf = &frame_bufs[idx].buf;
+#if CONFIG_FRAME_SIGN_BIAS
+#if CONFIG_OBU
+ // NOTE: For the scenario of (cm->frame_type != S_FRAME),
+ // ref_frame_sign_bias will be reset based on frame offsets.
+ cm->ref_frame_sign_bias[LAST_FRAME + i] = 0;
+#endif // CONFIG_OBU
+#else // !CONFIG_FRAME_SIGN_BIAS
+#if CONFIG_OBU
+ cm->ref_frame_sign_bias[LAST_FRAME + i] =
+ (cm->frame_type == S_FRAME) ? 0 : aom_rb_read_bit(rb);
+#else // !CONFIG_OBU
cm->ref_frame_sign_bias[LAST_FRAME + i] = aom_rb_read_bit(rb);
+#endif // CONFIG_OBU
+#endif // CONFIG_FRAME_SIGN_BIAS
#if CONFIG_REFERENCE_BUFFER
- if (pbi->seq_params.frame_id_numbers_present_flag) {
- int frame_id_length = pbi->seq_params.frame_id_length_minus7 + 7;
- int diff_len = pbi->seq_params.delta_frame_id_length_minus2 + 2;
+ if (cm->seq_params.frame_id_numbers_present_flag) {
+ int frame_id_length = cm->seq_params.frame_id_length_minus7 + 7;
+ int diff_len = cm->seq_params.delta_frame_id_length_minus2 + 2;
int delta_frame_id_minus1 = aom_rb_read_literal(rb, diff_len);
int ref_frame_id =
((cm->current_frame_id - (delta_frame_id_minus1 + 1) +
(1 << frame_id_length)) %
(1 << frame_id_length));
/* Compare values derived from delta_frame_id_minus1 and
- * refresh_frame_flags. Also, check valid for referencing */
+ * refresh_frame_flags. Also, check valid for referencing */
if (ref_frame_id != cm->ref_frame_id[ref] ||
cm->valid_for_referencing[ref] == 0)
aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Reference buffer frame ID mismatch");
}
-#endif
+#endif // CONFIG_REFERENCE_BUFFER
}
#if CONFIG_VAR_REFS
@@ -4657,12 +4938,20 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
setup_frame_size_with_refs(cm, rb);
#endif
+#if CONFIG_AMVR
+ if (cm->seq_mv_precision_level == 2) {
+ cm->cur_frame_mv_precision_level = aom_rb_read_bit(rb) ? 0 : 1;
+ } else {
+ cm->cur_frame_mv_precision_level = cm->seq_mv_precision_level;
+ }
+#endif
cm->allow_high_precision_mv = aom_rb_read_bit(rb);
cm->interp_filter = read_frame_interp_filter(rb);
#if CONFIG_TEMPMV_SIGNALING
- if (!cm->error_resilient_mode) {
+ if (frame_might_use_prev_frame_mvs(cm))
cm->use_prev_frame_mvs = aom_rb_read_bit(rb);
- }
+ else
+ cm->use_prev_frame_mvs = 0;
#endif
for (i = 0; i < INTER_REFS_PER_FRAME; ++i) {
RefBuffer *const ref_buf = &cm->frame_refs[i];
@@ -4679,14 +4968,45 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
}
}
}
+
+#if CONFIG_FRAME_MARKER
+ if (cm->show_frame == 0) {
+ cm->frame_offset = cm->current_video_frame + aom_rb_read_literal(rb, 4);
+ } else {
+ cm->frame_offset = cm->current_video_frame;
+ }
+ av1_setup_frame_buf_refs(cm);
+
+#if CONFIG_FRAME_SIGN_BIAS
+#if CONFIG_OBU
+ if (cm->frame_type != S_FRAME)
+#endif // CONFIG_OBU
+ av1_setup_frame_sign_bias(cm);
+#define FRAME_SIGN_BIAS_DEBUG 0
+#if FRAME_SIGN_BIAS_DEBUG
+ {
+ printf("\n\nDECODER: Frame=%d, show_frame=%d:", cm->current_video_frame,
+ cm->show_frame);
+ MV_REFERENCE_FRAME ref_frame;
+ for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) {
+ printf(" sign_bias[%d]=%d", ref_frame,
+ cm->ref_frame_sign_bias[ref_frame]);
+ }
+ printf("\n");
+ }
+#endif // FRAME_SIGN_BIAS_DEBUG
+#undef FRAME_SIGN_BIAS_DEBUG
+#endif // CONFIG_FRAME_SIGN_BIAS
+#endif // CONFIG_FRAME_MARKER
+
#if CONFIG_TEMPMV_SIGNALING
cm->cur_frame->intra_only = cm->frame_type == KEY_FRAME || cm->intra_only;
#endif
#if CONFIG_REFERENCE_BUFFER
- if (pbi->seq_params.frame_id_numbers_present_flag) {
+ if (cm->seq_params.frame_id_numbers_present_flag) {
/* If bitmask is set, update reference frame id values and
- mark frames as valid for reference */
+ mark frames as valid for reference */
int refresh_frame_flags =
cm->frame_type == KEY_FRAME ? 0xFF : pbi->refresh_frame_flags;
for (i = 0; i < REF_FRAMES; i++) {
@@ -4696,7 +5016,7 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
}
}
}
-#endif
+#endif // CONFIG_REFERENCE_BUFFER
get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
get_frame_new_buffer(cm)->color_space = cm->color_space;
@@ -4721,10 +5041,11 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
} else {
cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_FORWARD;
}
-
+#if !CONFIG_NO_FRAME_CONTEXT_SIGNALING
// This flag will be overridden by the call to av1_setup_past_independence
// below, forcing the use of context 0 for those frame types.
cm->frame_context_idx = aom_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
+#endif
// Generate next_ref_frame_map.
lock_buffer_pool(pool);
@@ -4754,12 +5075,6 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
if (frame_is_intra_only(cm) || cm->error_resilient_mode)
av1_setup_past_independence(cm);
-#if CONFIG_EXT_PARTITION
- set_sb_size(cm, aom_rb_read_bit(rb) ? BLOCK_128X128 : BLOCK_64X64);
-#else
- set_sb_size(cm, BLOCK_64X64);
-#endif // CONFIG_EXT_PARTITION
-
setup_loopfilter(cm, rb);
setup_quantization(cm, rb);
xd->bd = (int)cm->bit_depth;
@@ -4770,13 +5085,18 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
cm->reset_frame_context == RESET_FRAME_CONTEXT_ALL) {
for (i = 0; i < FRAME_CONTEXTS; ++i) cm->frame_contexts[i] = *cm->fc;
} else if (cm->reset_frame_context == RESET_FRAME_CONTEXT_CURRENT) {
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+ if (cm->frame_refs[0].idx <= 0) {
+ cm->frame_contexts[cm->frame_refs[0].idx] = *cm->fc;
+ }
+#else
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+#endif // CONFIG_NO_FRAME_CONTEXT_SIGNALING
}
#endif // CONFIG_Q_ADAPT_PROBS
setup_segmentation(cm, rb);
-#if CONFIG_DELTA_Q
{
struct segmentation *const seg = &cm->seg;
int segment_quantizer_active = 0;
@@ -4789,6 +5109,10 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
cm->delta_q_res = 1;
#if CONFIG_EXT_DELTA_Q
cm->delta_lf_res = 1;
+ cm->delta_lf_present_flag = 0;
+#if CONFIG_LOOPFILTER_LEVEL
+ cm->delta_lf_multi = 0;
+#endif // CONFIG_LOOPFILTER_LEVEL
#endif
if (segment_quantizer_active == 0 && cm->base_qindex > 0) {
cm->delta_q_present_flag = aom_rb_read_bit(rb);
@@ -4804,10 +5128,17 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
if (cm->delta_lf_present_flag) {
xd->prev_delta_lf_from_base = 0;
cm->delta_lf_res = 1 << aom_rb_read_literal(rb, 2);
+#if CONFIG_LOOPFILTER_LEVEL
+ cm->delta_lf_multi = aom_rb_read_bit(rb);
+ for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id)
+ xd->prev_delta_lf[lf_id] = 0;
+#endif // CONFIG_LOOPFILTER_LEVEL
}
#endif // CONFIG_EXT_DELTA_Q
}
}
+#if CONFIG_AMVR
+ xd->cur_frame_mv_precision_level = cm->cur_frame_mv_precision_level;
#endif
for (i = 0; i < MAX_SEGMENTS; ++i) {
@@ -4830,20 +5161,72 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
#endif // CONFIG_LOOP_RESTORATION
cm->tx_mode = read_tx_mode(cm, rb);
cm->reference_mode = read_frame_reference_mode(cm, rb);
-#if CONFIG_EXT_INTER
+ if (cm->reference_mode != SINGLE_REFERENCE) setup_compound_reference_mode(cm);
read_compound_tools(cm, rb);
-#endif // CONFIG_EXT_INTER
#if CONFIG_EXT_TX
cm->reduced_tx_set_used = aom_rb_read_bit(rb);
#endif // CONFIG_EXT_TX
- read_tile_info(pbi, rb);
- sz = aom_rb_read_literal(rb, 16);
+#if CONFIG_ADAPT_SCAN
+ cm->use_adapt_scan = aom_rb_read_bit(rb);
+ // TODO(angiebird): call av1_init_scan_order only when use_adapt_scan
+ // switches from 1 to 0
+ if (cm->use_adapt_scan == 0) av1_init_scan_order(cm);
+#endif // CONFIG_ADAPT_SCAN
+
+#if CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
+ // NOTE(zoeliu): As cm->prev_frame can take neither a frame of
+ // show_exisiting_frame=1, nor can it take a frame not used as
+ // a reference, it is probable that by the time it is being
+ // referred to, the frame buffer it originally points to may
+ // already get expired and have been reassigned to the current
+ // newly coded frame. Hence, we need to check whether this is
+ // the case, and if yes, we have 2 choices:
+ // (1) Simply disable the use of previous frame mvs; or
+ // (2) Have cm->prev_frame point to one reference frame buffer,
+ // e.g. LAST_FRAME.
+ if (!dec_is_ref_frame_buf(pbi, cm->prev_frame)) {
+ // Reassign the LAST_FRAME buffer to cm->prev_frame.
+ cm->prev_frame =
+ cm->frame_refs[LAST_FRAME - LAST_FRAME].idx != INVALID_IDX
+ ? &cm->buffer_pool
+ ->frame_bufs[cm->frame_refs[LAST_FRAME - LAST_FRAME].idx]
+ : NULL;
+ }
+#endif // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
- if (sz == 0)
+#if CONFIG_TEMPMV_SIGNALING
+ if (cm->use_prev_frame_mvs && !frame_can_use_prev_frame_mvs(cm)) {
aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
- "Invalid header size");
+ "Frame wrongly requests previous frame MVs");
+ }
+#else
+ cm->use_prev_frame_mvs = !cm->error_resilient_mode && cm->prev_frame &&
+#if CONFIG_FRAME_SUPERRES
+ cm->width == cm->last_width &&
+ cm->height == cm->last_height &&
+#else
+ cm->width == cm->prev_frame->buf.y_crop_width &&
+ cm->height == cm->prev_frame->buf.y_crop_height &&
+#endif // CONFIG_FRAME_SUPERRES
+ !cm->last_intra_only && cm->last_show_frame &&
+ (cm->last_frame_type != KEY_FRAME);
+#endif // CONFIG_TEMPMV_SIGNALING
+
+#if CONFIG_GLOBAL_MOTION
+ if (!frame_is_intra_only(cm)) read_global_motion(cm, rb);
+#endif
+
+ read_tile_info(pbi, rb);
+ if (use_compressed_header(cm)) {
+ sz = aom_rb_read_literal(rb, 16);
+ if (sz == 0)
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Invalid header size");
+ } else {
+ sz = 0;
+ }
return sz;
}
@@ -4860,122 +5243,14 @@ static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
}
#endif // CONFIG_SUPERTX
-#if CONFIG_GLOBAL_MOTION
-static void read_global_motion_params(WarpedMotionParams *params,
- WarpedMotionParams *ref_params,
- aom_reader *r, int allow_hp) {
- TransformationType type = aom_read_bit(r, ACCT_STR);
- if (type != IDENTITY) type += aom_read_literal(r, GLOBAL_TYPE_BITS, ACCT_STR);
- int trans_bits;
- int trans_dec_factor;
- int trans_prec_diff;
- set_default_warp_params(params);
- params->wmtype = type;
- switch (type) {
- case HOMOGRAPHY:
- case HORTRAPEZOID:
- case VERTRAPEZOID:
- if (type != HORTRAPEZOID)
- params->wmmat[6] =
- aom_read_signed_primitive_refsubexpfin(
- r, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
- (ref_params->wmmat[6] >> GM_ROW3HOMO_PREC_DIFF), ACCT_STR) *
- GM_ROW3HOMO_DECODE_FACTOR;
- if (type != VERTRAPEZOID)
- params->wmmat[7] =
- aom_read_signed_primitive_refsubexpfin(
- r, GM_ROW3HOMO_MAX + 1, SUBEXPFIN_K,
- (ref_params->wmmat[7] >> GM_ROW3HOMO_PREC_DIFF), ACCT_STR) *
- GM_ROW3HOMO_DECODE_FACTOR;
- case AFFINE:
- case ROTZOOM:
- params->wmmat[2] = aom_read_signed_primitive_refsubexpfin(
- r, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
- (ref_params->wmmat[2] >> GM_ALPHA_PREC_DIFF) -
- (1 << GM_ALPHA_PREC_BITS),
- ACCT_STR) *
- GM_ALPHA_DECODE_FACTOR +
- (1 << WARPEDMODEL_PREC_BITS);
- if (type != VERTRAPEZOID)
- params->wmmat[3] =
- aom_read_signed_primitive_refsubexpfin(
- r, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
- (ref_params->wmmat[3] >> GM_ALPHA_PREC_DIFF), ACCT_STR) *
- GM_ALPHA_DECODE_FACTOR;
- if (type >= AFFINE) {
- if (type != HORTRAPEZOID)
- params->wmmat[4] =
- aom_read_signed_primitive_refsubexpfin(
- r, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
- (ref_params->wmmat[4] >> GM_ALPHA_PREC_DIFF), ACCT_STR) *
- GM_ALPHA_DECODE_FACTOR;
- params->wmmat[5] = aom_read_signed_primitive_refsubexpfin(
- r, GM_ALPHA_MAX + 1, SUBEXPFIN_K,
- (ref_params->wmmat[5] >> GM_ALPHA_PREC_DIFF) -
- (1 << GM_ALPHA_PREC_BITS),
- ACCT_STR) *
- GM_ALPHA_DECODE_FACTOR +
- (1 << WARPEDMODEL_PREC_BITS);
- } else {
- params->wmmat[4] = -params->wmmat[3];
- params->wmmat[5] = params->wmmat[2];
- }
- // fallthrough intended
- case TRANSLATION:
- trans_bits = (type == TRANSLATION) ? GM_ABS_TRANS_ONLY_BITS - !allow_hp
- : GM_ABS_TRANS_BITS;
- trans_dec_factor = (type == TRANSLATION)
- ? GM_TRANS_ONLY_DECODE_FACTOR * (1 << !allow_hp)
- : GM_TRANS_DECODE_FACTOR;
- trans_prec_diff = (type == TRANSLATION)
- ? GM_TRANS_ONLY_PREC_DIFF + !allow_hp
- : GM_TRANS_PREC_DIFF;
- params->wmmat[0] =
- aom_read_signed_primitive_refsubexpfin(
- r, (1 << trans_bits) + 1, SUBEXPFIN_K,
- (ref_params->wmmat[0] >> trans_prec_diff), ACCT_STR) *
- trans_dec_factor;
- params->wmmat[1] =
- aom_read_signed_primitive_refsubexpfin(
- r, (1 << trans_bits) + 1, SUBEXPFIN_K,
- (ref_params->wmmat[1] >> trans_prec_diff), ACCT_STR) *
- trans_dec_factor;
- case IDENTITY: break;
- default: assert(0);
- }
- if (params->wmtype <= AFFINE)
- if (!get_shear_params(params)) assert(0);
-}
-
-static void read_global_motion(AV1_COMMON *cm, aom_reader *r) {
- int frame;
- YV12_BUFFER_CONFIG *ref_buf;
- for (frame = LAST_FRAME; frame <= ALTREF_FRAME; ++frame) {
- ref_buf = get_ref_frame(cm, frame);
- if (cm->width == ref_buf->y_crop_width &&
- cm->height == ref_buf->y_crop_height) {
- read_global_motion_params(&cm->global_motion[frame],
- &cm->prev_frame->global_motion[frame], r,
- cm->allow_high_precision_mv);
- } else {
- set_default_warp_params(&cm->global_motion[frame]);
- }
- /*
- printf("Dec Ref %d [%d/%d]: %d %d %d %d\n",
- frame, cm->current_video_frame, cm->show_frame,
- cm->global_motion[frame].wmmat[0],
- cm->global_motion[frame].wmmat[1],
- cm->global_motion[frame].wmmat[2],
- cm->global_motion[frame].wmmat[3]);
- */
- }
- memcpy(cm->cur_frame->global_motion, cm->global_motion,
- TOTAL_REFS_PER_FRAME * sizeof(WarpedMotionParams));
-}
-#endif // CONFIG_GLOBAL_MOTION
-
static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
size_t partition_size) {
+#if CONFIG_RESTRICT_COMPRESSED_HDR
+ (void)pbi;
+ (void)data;
+ (void)partition_size;
+ return 0;
+#else
AV1_COMMON *const cm = &pbi->common;
#if CONFIG_SUPERTX
MACROBLOCKD *const xd = &pbi->mb;
@@ -4994,46 +5269,30 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
aom_internal_error(&cm->error, AOM_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
-#if CONFIG_LOOP_RESTORATION
- if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
- cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
- cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
- av1_alloc_restoration_buffers(cm);
- decode_restoration(cm, &r);
- }
-#endif
-
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
if (cm->tx_mode == TX_MODE_SELECT)
av1_diff_update_prob(&r, &fc->quarter_tx_size_prob, ACCT_STR);
#endif
-#if CONFIG_LV_MAP
- av1_read_txb_probs(fc, cm->tx_mode, &r);
-#endif // CONFIG_LV_MAP
+#if CONFIG_LV_MAP && !LV_MAP_PROB
+ av1_read_txb_probs(fc, cm->tx_mode, &r, &cm->counts);
+#endif // CONFIG_LV_MAP && !LV_MAP_PROB
#if !CONFIG_NEW_MULTISYMBOL
#if CONFIG_VAR_TX
- for (i = 0; i < TXFM_PARTITION_CONTEXTS; ++i)
- av1_diff_update_prob(&r, &fc->txfm_partition_prob[i], ACCT_STR);
+ if (cm->tx_mode == TX_MODE_SELECT)
+ for (i = 0; i < TXFM_PARTITION_CONTEXTS; ++i)
+ av1_diff_update_prob(&r, &fc->txfm_partition_prob[i], ACCT_STR);
#endif // CONFIG_VAR_TX
for (i = 0; i < SKIP_CONTEXTS; ++i)
av1_diff_update_prob(&r, &fc->skip_probs[i], ACCT_STR);
#endif
- if (frame_is_intra_only(cm)) {
- av1_copy(cm->fc->kf_y_cdf, av1_kf_y_mode_cdf);
-#if CONFIG_INTRABC
- if (cm->allow_screen_content_tools) {
- av1_diff_update_prob(&r, &fc->intrabc_prob, ACCT_STR);
- }
-#endif
- } else {
+ if (!frame_is_intra_only(cm)) {
#if !CONFIG_NEW_MULTISYMBOL
read_inter_mode_probs(fc, &r);
#endif
-#if CONFIG_EXT_INTER
#if CONFIG_INTERINTRA
if (cm->reference_mode != COMPOUND_REFERENCE &&
cm->allow_interintra_compound) {
@@ -5058,43 +5317,40 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#endif // CONFIG_WEDGE
}
#endif // CONFIG_INTERINTRA
-#endif // CONFIG_EXT_INTER
-
-#if CONFIG_NCOBMC_ADAPT_WEIGHT && CONFIG_MOTION_VAR
- for (i = 0; i < ADAPT_OVERLAP_BLOCKS; ++i) {
- for (int j = 0; j < MAX_NCOBMC_MODES - 1; ++j)
- av1_diff_update_prob(&r, &fc->ncobmc_mode_prob[i][j], ACCT_STR);
- }
-#endif // CONFIG_NCOBMC_ADAPT_WEIGHT && CONFIG_MOTION_VAR
#if !CONFIG_NEW_MULTISYMBOL
for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
av1_diff_update_prob(&r, &fc->intra_inter_prob[i], ACCT_STR);
#endif
- if (cm->reference_mode != SINGLE_REFERENCE)
- setup_compound_reference_mode(cm);
+#if !CONFIG_NEW_MULTISYMBOL
read_frame_reference_mode_probs(cm, &r);
+#endif
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
for (i = 0; i < COMP_INTER_MODE_CONTEXTS; i++)
av1_diff_update_prob(&r, &fc->comp_inter_mode_prob[i], ACCT_STR);
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
#if !CONFIG_NEW_MULTISYMBOL
- for (i = 0; i < NMV_CONTEXTS; ++i)
- read_mv_probs(&fc->nmvc[i], cm->allow_high_precision_mv, &r);
+#if CONFIG_AMVR
+ if (cm->cur_frame_mv_precision_level == 0) {
+#endif
+ for (i = 0; i < NMV_CONTEXTS; ++i)
+ read_mv_probs(&fc->nmvc[i], cm->allow_high_precision_mv, &r);
+#if CONFIG_AMVR
+ }
+#endif
#endif
#if CONFIG_SUPERTX
if (!xd->lossless[0]) read_supertx_probs(fc, &r);
#endif
-#if CONFIG_GLOBAL_MOTION
- read_global_motion(cm, &r);
-#endif
}
return aom_reader_has_error(&r);
+#endif // CONFIG_RESTRICT_COMPRESSED_HDR
}
+
#ifdef NDEBUG
#define debug_check_frame_counts(cm) (void)0
#else // !NDEBUG
@@ -5105,22 +5361,10 @@ static void debug_check_frame_counts(const AV1_COMMON *const cm) {
av1_zero(zero_counts);
assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
cm->error_resilient_mode);
-#if CONFIG_ENTROPY_STATS
- assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
- sizeof(cm->counts.y_mode)));
- assert(!memcmp(cm->counts.uv_mode, zero_counts.uv_mode,
- sizeof(cm->counts.uv_mode)));
-#endif
assert(!memcmp(cm->counts.partition, zero_counts.partition,
sizeof(cm->counts.partition)));
- assert(!memcmp(cm->counts.coef, zero_counts.coef, sizeof(cm->counts.coef)));
- assert(!memcmp(cm->counts.eob_branch, zero_counts.eob_branch,
- sizeof(cm->counts.eob_branch)));
- assert(!memcmp(cm->counts.blockz_count, zero_counts.blockz_count,
- sizeof(cm->counts.blockz_count)));
assert(!memcmp(cm->counts.switchable_interp, zero_counts.switchable_interp,
sizeof(cm->counts.switchable_interp)));
-#if CONFIG_EXT_INTER
assert(!memcmp(cm->counts.inter_compound_mode,
zero_counts.inter_compound_mode,
sizeof(cm->counts.inter_compound_mode)));
@@ -5135,7 +5379,6 @@ static void debug_check_frame_counts(const AV1_COMMON *const cm) {
assert(!memcmp(cm->counts.compound_interinter,
zero_counts.compound_interinter,
sizeof(cm->counts.compound_interinter)));
-#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
assert(!memcmp(cm->counts.motion_mode, zero_counts.motion_mode,
sizeof(cm->counts.motion_mode)));
@@ -5146,10 +5389,10 @@ static void debug_check_frame_counts(const AV1_COMMON *const cm) {
#endif
assert(!memcmp(cm->counts.intra_inter, zero_counts.intra_inter,
sizeof(cm->counts.intra_inter)));
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
assert(!memcmp(cm->counts.comp_inter_mode, zero_counts.comp_inter_mode,
sizeof(cm->counts.comp_inter_mode)));
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
assert(!memcmp(cm->counts.comp_inter, zero_counts.comp_inter,
sizeof(cm->counts.comp_inter)));
#if CONFIG_EXT_COMP_REFS
@@ -5173,10 +5416,6 @@ static void debug_check_frame_counts(const AV1_COMMON *const cm) {
!memcmp(&cm->counts.mv[0], &zero_counts.mv[0], sizeof(cm->counts.mv[0])));
assert(
!memcmp(&cm->counts.mv[1], &zero_counts.mv[1], sizeof(cm->counts.mv[0])));
- assert(!memcmp(cm->counts.inter_ext_tx, zero_counts.inter_ext_tx,
- sizeof(cm->counts.inter_ext_tx)));
- assert(!memcmp(cm->counts.intra_ext_tx, zero_counts.intra_ext_tx,
- sizeof(cm->counts.intra_ext_tx)));
}
#endif // NDEBUG
@@ -5200,12 +5439,6 @@ static struct aom_read_bit_buffer *init_read_bit_buffer(
//------------------------------------------------------------------------------
-int av1_read_sync_code(struct aom_read_bit_buffer *const rb) {
- return aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_0 &&
- aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_1 &&
- aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_2;
-}
-
void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
int *height) {
*width = aom_rb_read_literal(rb, 16) + 1;
@@ -5239,12 +5472,34 @@ void superres_post_decode(AV1Decoder *pbi) {
}
#endif // CONFIG_FRAME_SUPERRES
-void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
- const uint8_t *data_end, const uint8_t **p_data_end) {
+static void dec_setup_frame_boundary_info(AV1_COMMON *const cm) {
+// Note: When LOOPFILTERING_ACROSS_TILES is enabled, we need to clear the
+// boundary information every frame, since the tile boundaries may
+// change every frame (particularly when dependent-horztiles is also
+// enabled); when it is disabled, the only information stored is the frame
+// boundaries, which only depend on the frame size.
+#if !CONFIG_LOOPFILTERING_ACROSS_TILES
+ if (cm->width != cm->last_width || cm->height != cm->last_height)
+#endif // CONFIG_LOOPFILTERING_ACROSS_TILES
+ {
+ int row, col;
+ for (row = 0; row < cm->mi_rows; ++row) {
+ MODE_INFO *mi = cm->mi + row * cm->mi_stride;
+ for (col = 0; col < cm->mi_cols; ++col) {
+ mi->mbmi.boundary_info = 0;
+ mi++;
+ }
+ }
+ av1_setup_frame_boundary_info(cm);
+ }
+}
+
+size_t av1_decode_frame_headers_and_setup(AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end) {
AV1_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
struct aom_read_bit_buffer rb;
- int context_updated = 0;
uint8_t clear_data[MAX_AV1_HEADER_SIZE];
size_t first_partition_size;
YV12_BUFFER_CONFIG *new_fb;
@@ -5259,6 +5514,15 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
bitstream_queue_set_frame_read(cm->current_video_frame * 2 + cm->show_frame);
#endif
+#if CONFIG_GLOBAL_MOTION
+ int i;
+ for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
+ cm->global_motion[i] = default_warp_params;
+ cm->cur_frame->global_motion[i] = default_warp_params;
+ }
+ xd->global_motion = cm->global_motion;
+#endif // CONFIG_GLOBAL_MOTION
+
first_partition_size = read_uncompressed_header(
pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
@@ -5288,25 +5552,18 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
xd->cur_buf->y_crop_width, xd->cur_buf->y_crop_height);
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_INTRABC
-#if CONFIG_GLOBAL_MOTION
- int i;
- for (i = LAST_FRAME; i <= ALTREF_FRAME; ++i) {
- set_default_warp_params(&cm->global_motion[i]);
- set_default_warp_params(&cm->cur_frame->global_motion[i]);
- }
- xd->global_motion = cm->global_motion;
-#endif // CONFIG_GLOBAL_MOTION
- if (!first_partition_size) {
+ if (cm->show_existing_frame) {
// showing a frame directly
*p_data_end = data + aom_rb_bytes_read(&rb);
- return;
+ return 0;
}
data += aom_rb_bytes_read(&rb);
- if (!read_is_valid(data, first_partition_size, data_end))
- aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
- "Truncated packet or corrupt header length");
+ if (first_partition_size)
+ if (!read_is_valid(data, first_partition_size, data_end))
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Truncated packet or corrupt header length");
cm->setup_mi(cm);
@@ -5330,15 +5587,9 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
#endif // CONFIG_EXT_REFS || CONFIG_TEMPMV_SIGNALING
#if CONFIG_TEMPMV_SIGNALING
- if (cm->use_prev_frame_mvs) {
- assert(!cm->error_resilient_mode && cm->prev_frame);
-#if CONFIG_FRAME_SUPERRES
- assert(cm->width == cm->last_width && cm->height == cm->last_height);
-#else
- assert(cm->width == last_fb_ref_buf->buf->y_crop_width &&
- cm->height == last_fb_ref_buf->buf->y_crop_height);
-#endif // CONFIG_FRAME_SUPERRES
- assert(!cm->prev_frame->intra_only);
+ if (cm->use_prev_frame_mvs && !frame_can_use_prev_frame_mvs(cm)) {
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Frame wrongly requests previous frame MVs");
}
#else
cm->use_prev_frame_mvs = !cm->error_resilient_mode && cm->prev_frame &&
@@ -5353,10 +5604,24 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
(cm->last_frame_type != KEY_FRAME);
#endif // CONFIG_TEMPMV_SIGNALING
- av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+#if CONFIG_MFMV
+ av1_setup_motion_field(cm);
+#endif // CONFIG_MFMV
+ av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+ if (cm->error_resilient_mode || frame_is_intra_only(cm)) {
+ // use the default frame context values
+ *cm->fc = cm->frame_contexts[FRAME_CONTEXT_DEFAULTS];
+ cm->pre_fc = &cm->frame_contexts[FRAME_CONTEXT_DEFAULTS];
+ } else {
+ *cm->fc = cm->frame_contexts[cm->frame_refs[0].idx];
+ cm->pre_fc = &cm->frame_contexts[cm->frame_refs[0].idx];
+ }
+#else
*cm->fc = cm->frame_contexts[cm->frame_context_idx];
cm->pre_fc = &cm->frame_contexts[cm->frame_context_idx];
+#endif // CONFIG_NO_FRAME_CONTEXT_SIGNALING
if (!cm->fc->initialized)
aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Uninitialized entropy context.");
@@ -5364,24 +5629,50 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
av1_zero(cm->counts);
xd->corrupted = 0;
- new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
- if (new_fb->corrupted)
- aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
- "Decode failed. Frame data header is corrupted.");
+ if (first_partition_size) {
+ new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
+ if (new_fb->corrupted)
+ aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
+ "Decode failed. Frame data header is corrupted.");
+ }
+ return first_partition_size;
+}
+void av1_decode_tg_tiles_and_wrapup(AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end, int startTile,
+ int endTile, int initialize_flag) {
+ AV1_COMMON *const cm = &pbi->common;
+ MACROBLOCKD *const xd = &pbi->mb;
+ int context_updated = 0;
+
+#if CONFIG_LOOP_RESTORATION
+ if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
+ cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
+ cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
+ av1_alloc_restoration_buffers(cm);
+ }
+#endif
+
+#if !CONFIG_LOOPFILTER_LEVEL
if (cm->lf.filter_level && !cm->skip_loop_filter) {
- av1_loop_filter_frame_init(cm, cm->lf.filter_level);
+ av1_loop_filter_frame_init(cm, cm->lf.filter_level, cm->lf.filter_level);
}
+#endif
// If encoded in frame parallel mode, frame context is ready after decoding
// the frame header.
- if (cm->frame_parallel_decode &&
+ if (cm->frame_parallel_decode && initialize_flag &&
cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD) {
AVxWorker *const worker = pbi->frame_worker_owner;
FrameWorkerData *const frame_worker_data = worker->data1;
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_FORWARD) {
context_updated = 1;
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+ cm->frame_contexts[cm->new_fb_idx] = *cm->fc;
+#else
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+#endif // CONFIG_NO_FRAME_CONTEXT_SIGNALING
}
av1_frameworker_lock_stats(worker);
pbi->cur_buf->row = -1;
@@ -5392,7 +5683,7 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
av1_frameworker_unlock_stats(worker);
}
- av1_setup_frame_boundary_info(cm);
+ dec_setup_frame_boundary_info(cm);
if (pbi->max_threads > 1 && !CONFIG_CB4X4 &&
#if CONFIG_EXT_TILE
@@ -5400,22 +5691,49 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
#endif // CONFIG_EXT_TILE
cm->tile_cols > 1) {
// Multi-threaded tile decoder
- *p_data_end = decode_tiles_mt(pbi, data + first_partition_size, data_end);
+ *p_data_end =
+ decode_tiles_mt(pbi, data + pbi->first_partition_size, data_end);
if (!xd->corrupted) {
if (!cm->skip_loop_filter) {
- // If multiple threads are used to decode tiles, then we use those
- // threads to do parallel loopfiltering.
- av1_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane, cm->lf.filter_level,
- 0, 0, pbi->tile_workers, pbi->num_tile_workers,
+// If multiple threads are used to decode tiles, then we use those
+// threads to do parallel loopfiltering.
+#if CONFIG_LOOPFILTER_LEVEL
+ av1_loop_filter_frame_mt(
+ (YV12_BUFFER_CONFIG *)xd->cur_buf, cm, pbi->mb.plane,
+ cm->lf.filter_level[0], cm->lf.filter_level[1], 0, 0,
+ pbi->tile_workers, pbi->num_tile_workers, &pbi->lf_row_sync);
+#else
+ av1_loop_filter_frame_mt((YV12_BUFFER_CONFIG *)xd->cur_buf, cm,
+ pbi->mb.plane, cm->lf.filter_level, 0, 0,
+ pbi->tile_workers, pbi->num_tile_workers,
&pbi->lf_row_sync);
+#endif // CONFIG_LOOPFILTER_LEVEL
}
} else {
aom_internal_error(&cm->error, AOM_CODEC_CORRUPT_FRAME,
"Decode failed. Frame data is corrupted.");
}
} else {
- *p_data_end = decode_tiles(pbi, data + first_partition_size, data_end);
+#if CONFIG_OBU
+ *p_data_end = decode_tiles(pbi, data, data_end, startTile, endTile);
+#else
+ *p_data_end = decode_tiles(
+ pbi, data + pbi->uncomp_hdr_size + pbi->first_partition_size, data_end,
+ startTile, endTile);
+#endif
+ }
+
+ if (endTile != cm->tile_rows * cm->tile_cols - 1) {
+ return;
+ }
+
+#if CONFIG_STRIPED_LOOP_RESTORATION
+ if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
+ cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
+ cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
+ av1_loop_restoration_save_boundary_lines(&pbi->cur_buf->buf, cm);
}
+#endif
#if CONFIG_CDEF
if (!cm->skip_loop_filter && !cm->all_lossless) {
@@ -5431,7 +5749,9 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
if (cm->rst_info[0].frame_restoration_type != RESTORE_NONE ||
cm->rst_info[1].frame_restoration_type != RESTORE_NONE ||
cm->rst_info[2].frame_restoration_type != RESTORE_NONE) {
- av1_loop_restoration_frame(new_fb, cm, cm->rst_info, 7, 0, NULL);
+ aom_extend_frame_borders((YV12_BUFFER_CONFIG *)xd->cur_buf);
+ av1_loop_restoration_frame((YV12_BUFFER_CONFIG *)xd->cur_buf, cm,
+ cm->rst_info, 7, 0, NULL);
}
#endif // CONFIG_LOOP_RESTORATION
@@ -5443,7 +5763,12 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
aom_malloc(cm->tile_rows * cm->tile_cols *
sizeof(&pbi->tile_data[0].tctx.partition_cdf[0][0]));
make_update_tile_list_dec(pbi, cm->tile_rows, cm->tile_cols, tile_ctxs);
+#if CONFIG_LV_MAP
av1_adapt_coef_probs(cm);
+#endif // CONFIG_LV_MAP
+#if CONFIG_SYMBOLRATE
+ av1_dump_symbol_rate(cm);
+#endif
av1_adapt_intra_frame_probs(cm);
av1_average_tile_coef_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
cm->tile_rows * cm->tile_cols);
@@ -5459,7 +5784,9 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
if (!frame_is_intra_only(cm)) {
av1_adapt_inter_frame_probs(cm);
+#if !CONFIG_NEW_MULTISYMBOL
av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+#endif
av1_average_tile_inter_cdfs(&pbi->common, pbi->common.fc, tile_ctxs,
cdf_ptrs, cm->tile_rows * cm->tile_cols);
av1_average_tile_mv_cdfs(pbi->common.fc, tile_ctxs, cdf_ptrs,
@@ -5481,7 +5808,153 @@ void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
}
#endif
- // Non frame parallel update frame context here.
+// Non frame parallel update frame context here.
+#if CONFIG_NO_FRAME_CONTEXT_SIGNALING
+ if (!context_updated) cm->frame_contexts[cm->new_fb_idx] = *cm->fc;
+#else
if (!cm->error_resilient_mode && !context_updated)
cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
+#endif
+}
+
+#if CONFIG_OBU
+
+static OBU_TYPE read_obu_header(struct aom_read_bit_buffer *rb,
+ uint32_t *header_size) {
+ OBU_TYPE obu_type;
+ int obu_extension_flag;
+
+ *header_size = 1;
+
+ obu_type = (OBU_TYPE)aom_rb_read_literal(rb, 5);
+ aom_rb_read_literal(rb, 2); // reserved
+ obu_extension_flag = aom_rb_read_bit(rb);
+ if (obu_extension_flag) {
+ *header_size += 1;
+ aom_rb_read_literal(rb, 3); // temporal_id
+ aom_rb_read_literal(rb, 2);
+ aom_rb_read_literal(rb, 2);
+ aom_rb_read_literal(rb, 1); // reserved
+ }
+
+ return obu_type;
+}
+
+static uint32_t read_temporal_delimiter_obu() { return 0; }
+
+static uint32_t read_sequence_header_obu(AV1Decoder *pbi,
+ struct aom_read_bit_buffer *rb) {
+ AV1_COMMON *const cm = &pbi->common;
+ SequenceHeader *const seq_params = &cm->seq_params;
+ uint32_t saved_bit_offset = rb->bit_offset;
+
+ cm->profile = av1_read_profile(rb);
+ aom_rb_read_literal(rb, 4); // level
+
+ seq_params->frame_id_numbers_present_flag = aom_rb_read_bit(rb);
+ if (seq_params->frame_id_numbers_present_flag) {
+ seq_params->frame_id_length_minus7 = aom_rb_read_literal(rb, 4);
+ seq_params->delta_frame_id_length_minus2 = aom_rb_read_literal(rb, 4);
+ }
+
+ read_bitdepth_colorspace_sampling(cm, rb, pbi->allow_lowbitdepth);
+
+ return ((rb->bit_offset - saved_bit_offset + 7) >> 3);
+}
+
+static uint32_t read_frame_header_obu(AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end) {
+ size_t header_size;
+
+ header_size =
+ av1_decode_frame_headers_and_setup(pbi, data, data_end, p_data_end);
+ return (uint32_t)(pbi->uncomp_hdr_size + header_size);
+}
+
+static uint32_t read_tile_group_header(AV1Decoder *pbi,
+ struct aom_read_bit_buffer *rb,
+ int *startTile, int *endTile) {
+ AV1_COMMON *const cm = &pbi->common;
+ uint32_t saved_bit_offset = rb->bit_offset;
+
+ *startTile = aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
+ *endTile = aom_rb_read_literal(rb, cm->log2_tile_rows + cm->log2_tile_cols);
+
+ return ((rb->bit_offset - saved_bit_offset + 7) >> 3);
+}
+
+static uint32_t read_one_tile_group_obu(AV1Decoder *pbi,
+ struct aom_read_bit_buffer *rb,
+ int is_first_tg, const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end,
+ int *is_last_tg) {
+ AV1_COMMON *const cm = &pbi->common;
+ int startTile, endTile;
+ uint32_t header_size, tg_payload_size;
+
+ header_size = read_tile_group_header(pbi, rb, &startTile, &endTile);
+ data += header_size;
+ av1_decode_tg_tiles_and_wrapup(pbi, data, data_end, p_data_end, startTile,
+ endTile, is_first_tg);
+ tg_payload_size = (uint32_t)(*p_data_end - data);
+
+ // TODO(shan): For now, assume all tile groups received in order
+ *is_last_tg = endTile == cm->tile_rows * cm->tile_cols - 1;
+
+ return header_size + tg_payload_size;
+}
+
+void av1_decode_frame_from_obus(struct AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end) {
+ AV1_COMMON *const cm = &pbi->common;
+ int frame_decoding_finished = 0;
+ int is_first_tg_obu_received = 1;
+ int frame_header_received = 0;
+ int frame_header_size = 0;
+
+ // decode frame as a series of OBUs
+ while (!frame_decoding_finished && !cm->error.error_code) {
+ struct aom_read_bit_buffer rb;
+ uint8_t clear_data[80];
+ uint32_t obu_size, obu_header_size, obu_payload_size = 0;
+ OBU_TYPE obu_type;
+
+ init_read_bit_buffer(pbi, &rb, data + 4, data_end, clear_data);
+
+ // every obu is preceded by 4-byte size of obu (obu header + payload size)
+ // The obu size is only needed for tile group OBUs
+ obu_size = mem_get_le32(data);
+ obu_type = read_obu_header(&rb, &obu_header_size);
+ data += (4 + obu_header_size);
+
+ switch (obu_type) {
+ case OBU_TD: obu_payload_size = read_temporal_delimiter_obu(); break;
+ case OBU_SEQUENCE_HEADER:
+ obu_payload_size = read_sequence_header_obu(pbi, &rb);
+ break;
+ case OBU_FRAME_HEADER:
+ // Only decode first frame header received
+ if (!frame_header_received) {
+ frame_header_size = obu_payload_size =
+ read_frame_header_obu(pbi, data, data_end, p_data_end);
+ frame_header_received = 1;
+ } else {
+ obu_payload_size = frame_header_size;
+ }
+ if (cm->show_existing_frame) frame_decoding_finished = 1;
+ break;
+ case OBU_TILE_GROUP:
+ obu_payload_size = read_one_tile_group_obu(
+ pbi, &rb, is_first_tg_obu_received, data, data + obu_size - 1,
+ p_data_end, &frame_decoding_finished);
+ is_first_tg_obu_received = 0;
+ break;
+ default: break;
+ }
+ data += obu_payload_size;
+ }
}
+#endif
diff --git a/third_party/aom/av1/decoder/decodeframe.h b/third_party/aom/av1/decoder/decodeframe.h
index a904658b0..0e7eb6a1d 100644
--- a/third_party/aom/av1/decoder/decodeframe.h
+++ b/third_party/aom/av1/decoder/decodeframe.h
@@ -21,16 +21,33 @@ struct aom_read_bit_buffer;
#if CONFIG_REFERENCE_BUFFER
/* Placeholder for now */
-void read_sequence_header(SequenceHeader *seq_params);
+void read_sequence_header(SequenceHeader *seq_params,
+ struct aom_read_bit_buffer *rb);
#endif
-int av1_read_sync_code(struct aom_read_bit_buffer *const rb);
void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
int *height);
BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb);
+// This function is now obsolete
void av1_decode_frame(struct AV1Decoder *pbi, const uint8_t *data,
const uint8_t *data_end, const uint8_t **p_data_end);
+size_t av1_decode_frame_headers_and_setup(struct AV1Decoder *pbi,
+ const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end);
+
+void av1_decode_tg_tiles_and_wrapup(struct AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end, int startTile,
+ int endTile, int initialize_flag);
+
+#if CONFIG_OBU
+// replaces av1_decode_frame
+void av1_decode_frame_from_obus(struct AV1Decoder *pbi, const uint8_t *data,
+ const uint8_t *data_end,
+ const uint8_t **p_data_end);
+#endif
#ifdef __cplusplus
} // extern "C"
diff --git a/third_party/aom/av1/decoder/decodemv.c b/third_party/aom/av1/decoder/decodemv.c
index 7c8544283..cac27e9a6 100644
--- a/third_party/aom/av1/decoder/decodemv.c
+++ b/third_party/aom/av1/decoder/decodemv.c
@@ -36,11 +36,9 @@
#define DEC_MISMATCH_DEBUG 0
static PREDICTION_MODE read_intra_mode(aom_reader *r, aom_cdf_prob *cdf) {
- return (PREDICTION_MODE)
- av1_intra_mode_inv[aom_read_symbol(r, cdf, INTRA_MODES, ACCT_STR)];
+ return (PREDICTION_MODE)aom_read_symbol(r, cdf, INTRA_MODES, ACCT_STR);
}
-#if CONFIG_DELTA_Q
static int read_delta_qindex(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
MB_MODE_INFO *const mbmi, int mi_col, int mi_row) {
FRAME_COUNTS *counts = xd->counts;
@@ -63,7 +61,7 @@ static int read_delta_qindex(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
}
if (!smallval) {
- rem_bits = aom_read_literal(r, 3, ACCT_STR);
+ rem_bits = aom_read_literal(r, 3, ACCT_STR) + 1;
thr = (1 << rem_bits) + 1;
abs = aom_read_literal(r, rem_bits, ACCT_STR) + thr;
}
@@ -80,6 +78,9 @@ static int read_delta_qindex(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
}
#if CONFIG_EXT_DELTA_Q
static int read_delta_lflevel(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
+#if CONFIG_LOOPFILTER_LEVEL
+ int lf_id,
+#endif
MB_MODE_INFO *const mbmi, int mi_col,
int mi_row) {
FRAME_COUNTS *counts = xd->counts;
@@ -93,16 +94,37 @@ static int read_delta_lflevel(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
(void)cm;
- if ((bsize != BLOCK_64X64 || mbmi->skip == 0) && read_delta_lf_flag) {
+ if ((bsize != cm->sb_size || mbmi->skip == 0) && read_delta_lf_flag) {
+#if CONFIG_LOOPFILTER_LEVEL
+ if (cm->delta_lf_multi) {
+ assert(lf_id >= 0 && lf_id < FRAME_LF_COUNT);
+ abs = aom_read_symbol(r, ec_ctx->delta_lf_multi_cdf[lf_id],
+ DELTA_LF_PROBS + 1, ACCT_STR);
+ } else {
+ abs = aom_read_symbol(r, ec_ctx->delta_lf_cdf, DELTA_LF_PROBS + 1,
+ ACCT_STR);
+ }
+#else
abs =
aom_read_symbol(r, ec_ctx->delta_lf_cdf, DELTA_LF_PROBS + 1, ACCT_STR);
+#endif // CONFIG_LOOPFILTER_LEVEL
smallval = (abs < DELTA_LF_SMALL);
if (counts) {
+#if CONFIG_LOOPFILTER_LEVEL
+ if (cm->delta_lf_multi) {
+ for (i = 0; i < abs; ++i) counts->delta_lf_multi[lf_id][i][1]++;
+ if (smallval) counts->delta_lf_multi[lf_id][abs][0]++;
+ } else {
+ for (i = 0; i < abs; ++i) counts->delta_lf[i][1]++;
+ if (smallval) counts->delta_lf[abs][0]++;
+ }
+#else
for (i = 0; i < abs; ++i) counts->delta_lf[i][1]++;
if (smallval) counts->delta_lf[abs][0]++;
+#endif // CONFIG_LOOPFILTER_LEVEL
}
if (!smallval) {
- rem_bits = aom_read_literal(r, 3, ACCT_STR);
+ rem_bits = aom_read_literal(r, 3, ACCT_STR) + 1;
thr = (1 << rem_bits) + 1;
abs = aom_read_literal(r, rem_bits, ACCT_STR) + thr;
}
@@ -118,57 +140,41 @@ static int read_delta_lflevel(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
return reduced_delta_lflevel;
}
#endif
-#endif
-
-static PREDICTION_MODE read_intra_mode_y(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
- aom_reader *r, int size_group) {
- const PREDICTION_MODE y_mode =
- read_intra_mode(r, ec_ctx->y_mode_cdf[size_group]);
-#if CONFIG_ENTROPY_STATS
- FRAME_COUNTS *counts = xd->counts;
- if (counts) ++counts->y_mode[size_group][y_mode];
-#else
- /* TODO(negge): Can we remove this parameter? */
- (void)xd;
-#endif // CONFIG_ENTROPY_STATS
- return y_mode;
-}
static UV_PREDICTION_MODE read_intra_mode_uv(FRAME_CONTEXT *ec_ctx,
- MACROBLOCKD *xd, aom_reader *r,
+ aom_reader *r,
PREDICTION_MODE y_mode) {
const UV_PREDICTION_MODE uv_mode =
- read_intra_mode(r, ec_ctx->uv_mode_cdf[y_mode]);
-#if CONFIG_ENTROPY_STATS
- FRAME_COUNTS *counts = xd->counts;
- if (counts) ++counts->uv_mode[y_mode][uv_mode];
+#if CONFIG_CFL
+ aom_read_symbol(r, ec_ctx->uv_mode_cdf[y_mode], UV_INTRA_MODES, ACCT_STR);
#else
- /* TODO(negge): Can we remove this parameter? */
- (void)xd;
-#endif // CONFIG_ENTROPY_STATS
+ read_intra_mode(r, ec_ctx->uv_mode_cdf[y_mode]);
+#endif // CONFIG_CFL
return uv_mode;
}
#if CONFIG_CFL
static int read_cfl_alphas(FRAME_CONTEXT *const ec_ctx, aom_reader *r,
- CFL_SIGN_TYPE signs_out[CFL_PRED_PLANES]) {
- const int ind =
- aom_read_symbol(r, ec_ctx->cfl_alpha_cdf, CFL_ALPHABET_SIZE, "cfl:alpha");
- // Signs are only coded for nonzero values
- // sign == 0 implies negative alpha
- // sign == 1 implies positive alpha
- signs_out[CFL_PRED_U] = cfl_alpha_codes[ind][CFL_PRED_U]
- ? aom_read_bit(r, "cfl:sign")
- : CFL_SIGN_POS;
- signs_out[CFL_PRED_V] = cfl_alpha_codes[ind][CFL_PRED_V]
- ? aom_read_bit(r, "cfl:sign")
- : CFL_SIGN_POS;
-
- return ind;
+ int *signs_out) {
+ const int joint_sign =
+ aom_read_symbol(r, ec_ctx->cfl_sign_cdf, CFL_JOINT_SIGNS, "cfl:signs");
+ int idx = 0;
+ // Magnitudes are only coded for nonzero values
+ if (CFL_SIGN_U(joint_sign) != CFL_SIGN_ZERO) {
+ aom_cdf_prob *cdf_u = ec_ctx->cfl_alpha_cdf[CFL_CONTEXT_U(joint_sign)];
+ idx = aom_read_symbol(r, cdf_u, CFL_ALPHABET_SIZE, "cfl:alpha_u")
+ << CFL_ALPHABET_SIZE_LOG2;
+ }
+ if (CFL_SIGN_V(joint_sign) != CFL_SIGN_ZERO) {
+ aom_cdf_prob *cdf_v = ec_ctx->cfl_alpha_cdf[CFL_CONTEXT_V(joint_sign)];
+ idx += aom_read_symbol(r, cdf_v, CFL_ALPHABET_SIZE, "cfl:alpha_v");
+ }
+ *signs_out = joint_sign;
+ return idx;
}
#endif
-#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_INTERINTRA
static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int size_group) {
(void)cm;
@@ -179,7 +185,7 @@ static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
if (counts) ++counts->interintra_mode[size_group][ii_mode];
return ii_mode;
}
-#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#endif // CONFIG_INTERINTRA
static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
aom_reader *r, int16_t ctx) {
@@ -244,16 +250,11 @@ static void read_drl_idx(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
mbmi->ref_mv_idx = 0;
-#if CONFIG_EXT_INTER
+ if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV
#if CONFIG_COMPOUND_SINGLEREF
- if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV ||
- mbmi->mode == SR_NEW_NEWMV) {
-#else // !CONFIG_COMPOUND_SINGLEREF
- if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) {
+ || mbmi->mode == SR_NEW_NEWMV
#endif // CONFIG_COMPOUND_SINGLEREF
-#else // !CONFIG_EXT_INTER
- if (mbmi->mode == NEWMV) {
-#endif // CONFIG_EXT_INTER
+ ) {
int idx;
for (idx = 0; idx < 2; ++idx) {
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
@@ -295,21 +296,11 @@ static void read_drl_idx(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
static MOTION_MODE read_motion_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
MODE_INFO *mi, aom_reader *r) {
MB_MODE_INFO *mbmi = &mi->mbmi;
-#if CONFIG_NEW_MULTISYMBOL
+#if !CONFIG_MOTION_VAR || !CONFIG_WARPED_MOTION || CONFIG_NEW_MULTISYMBOL || \
+ CONFIG_NCOBMC_ADAPT_WEIGHT
(void)cm;
#endif
-#if CONFIG_NCOBMC_ADAPT_WEIGHT
- const MOTION_MODE last_motion_mode_allowed =
- motion_mode_allowed_wrapper(0,
-#if CONFIG_GLOBAL_MOTION
- 0, xd->global_motion,
-#endif // CONFIG_GLOBAL_MOTION
-#if CONFIG_WARPED_MOTION
- xd,
-#endif
- mi);
-#else
const MOTION_MODE last_motion_mode_allowed = motion_mode_allowed(
#if CONFIG_GLOBAL_MOTION
0, xd->global_motion,
@@ -318,12 +309,24 @@ static MOTION_MODE read_motion_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
xd,
#endif
mi);
-#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
int motion_mode;
FRAME_COUNTS *counts = xd->counts;
if (last_motion_mode_allowed == SIMPLE_TRANSLATION) return SIMPLE_TRANSLATION;
#if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+ if (last_motion_mode_allowed == NCOBMC_ADAPT_WEIGHT) {
+ motion_mode = aom_read_symbol(r, xd->tile_ctx->ncobmc_cdf[mbmi->sb_type],
+ OBMC_FAMILY_MODES, ACCT_STR);
+ if (counts) ++counts->ncobmc[mbmi->sb_type][motion_mode];
+ return (MOTION_MODE)(SIMPLE_TRANSLATION + motion_mode);
+ } else if (last_motion_mode_allowed == OBMC_CAUSAL) {
+ motion_mode =
+ aom_read_symbol(r, xd->tile_ctx->obmc_cdf[mbmi->sb_type], 2, ACCT_STR);
+ if (counts) ++counts->obmc[mbmi->sb_type][motion_mode];
+ return (MOTION_MODE)(SIMPLE_TRANSLATION + motion_mode);
+ } else {
+#else
if (last_motion_mode_allowed == OBMC_CAUSAL) {
#if CONFIG_NEW_MULTISYMBOL
motion_mode =
@@ -334,6 +337,7 @@ static MOTION_MODE read_motion_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
if (counts) ++counts->obmc[mbmi->sb_type][motion_mode];
return (MOTION_MODE)(SIMPLE_TRANSLATION + motion_mode);
} else {
+#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
#endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
motion_mode =
aom_read_symbol(r, xd->tile_ctx->motion_mode_cdf[mbmi->sb_type],
@@ -347,18 +351,12 @@ static MOTION_MODE read_motion_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_NCOBMC_ADAPT_WEIGHT
static void read_ncobmc_mode(MACROBLOCKD *xd, MODE_INFO *mi,
-#ifndef TRAINING_WEIGHTS
- NCOBMC_MODE ncobmc_mode[2],
-#else
- NCOBMC_MODE ncobmc_mode[][4],
-#endif
- aom_reader *r) {
+ NCOBMC_MODE ncobmc_mode[2], aom_reader *r) {
MB_MODE_INFO *mbmi = &mi->mbmi;
FRAME_COUNTS *counts = xd->counts;
ADAPT_OVERLAP_BLOCK ao_block = adapt_overlap_block_lookup[mbmi->sb_type];
if (mbmi->motion_mode != NCOBMC_ADAPT_WEIGHT) return;
-#ifndef TRAINING_WEIGHTS
ncobmc_mode[0] = aom_read_symbol(r, xd->tile_ctx->ncobmc_mode_cdf[ao_block],
MAX_NCOBMC_MODES, ACCT_STR);
if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[0]];
@@ -368,27 +366,10 @@ static void read_ncobmc_mode(MACROBLOCKD *xd, MODE_INFO *mi,
MAX_NCOBMC_MODES, ACCT_STR);
if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[1]];
}
-#else
- int i;
- for (i = 0; i < 4; ++i) {
- ncobmc_mode[0][i] = aom_read_symbol(
- r, xd->tile_ctx->ncobmc_mode_cdf[ao_block], MAX_NCOBMC_MODES, ACCT_STR);
- if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[0][i]];
- }
- if (mi_size_wide[mbmi->sb_type] != mi_size_high[mbmi->sb_type]) {
- for (i = 0; i < 4; ++i) {
- ncobmc_mode[1][i] =
- aom_read_symbol(r, xd->tile_ctx->ncobmc_mode_cdf[ao_block],
- MAX_NCOBMC_MODES, ACCT_STR);
- if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[1][i]];
- }
- }
-#endif
}
-#endif
+#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
-#if CONFIG_EXT_INTER
static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int16_t ctx) {
(void)cm;
@@ -418,7 +399,6 @@ static PREDICTION_MODE read_inter_singleref_comp_mode(MACROBLOCKD *xd,
return SR_NEAREST_NEARMV + mode;
}
#endif // CONFIG_COMPOUND_SINGLEREF
-#endif // CONFIG_EXT_INTER
static int read_segment_id(aom_reader *r, struct segmentation_probs *segp) {
return aom_read_symbol(r, segp->tree_cdf, MAX_SEGMENTS, ACCT_STR);
@@ -445,6 +425,7 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
[MAX_MIB_SIZE] =
(TX_SIZE(*)[MAX_MIB_SIZE]) & mbmi->inter_tx_size[tx_row][tx_col];
if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
+ assert(tx_size > TX_4X4);
if (depth == MAX_VARTX_DEPTH) {
int idx, idy;
@@ -454,7 +435,6 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
inter_tx_size[idy][idx] = tx_size;
mbmi->tx_size = tx_size;
mbmi->min_tx_size = AOMMIN(mbmi->min_tx_size, get_min_tx_size(tx_size));
- if (counts) ++counts->txfm_partition[ctx][0];
txfm_partition_update(xd->above_txfm_context + blk_col,
xd->left_txfm_context + blk_row, tx_size, tx_size);
return;
@@ -473,7 +453,7 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
if (counts) ++counts->txfm_partition[ctx][1];
- if (tx_size == TX_8X8) {
+ if (sub_txs == TX_4X4) {
int idx, idy;
inter_tx_size[0][0] = sub_txs;
for (idy = 0; idy < tx_size_high_unit[tx_size] / 2; ++idy)
@@ -509,7 +489,7 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
#endif
static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
- int tx_size_cat, aom_reader *r) {
+ int32_t tx_size_cat, aom_reader *r) {
FRAME_COUNTS *counts = xd->counts;
const int ctx = get_tx_size_context(xd);
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
@@ -530,11 +510,8 @@ static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int is_inter,
const TX_MODE tx_mode = cm->tx_mode;
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return TX_4X4;
-#if CONFIG_CB4X4 && (CONFIG_VAR_TX || CONFIG_EXT_TX) && CONFIG_RECT_TX
- if (bsize > BLOCK_4X4) {
-#else
- if (bsize >= BLOCK_8X8) {
-#endif // CONFIG_CB4X4 && CONFIG_VAR_TX
+
+ if (block_signals_txsize(bsize)) {
if ((!is_inter || allow_select_inter) && tx_mode == TX_MODE_SELECT) {
const int32_t tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
: intra_tx_size_cat_lookup[bsize];
@@ -548,10 +525,14 @@ static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int is_inter,
int quarter_tx;
if (quarter_txsize_lookup[bsize] != max_txsize_lookup[bsize]) {
+#if CONFIG_NEW_MULTISYMBOL
+ quarter_tx =
+ aom_read_symbol(r, cm->fc->quarter_tx_size_cdf, 2, ACCT_STR);
+#else
quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
-
if (counts) ++counts->quarter_tx_size[quarter_tx];
+#endif
} else {
quarter_tx = 1;
}
@@ -707,39 +688,55 @@ static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
}
}
-#if CONFIG_PALETTE
#if CONFIG_PALETTE_DELTA_ENCODING
-static int uint16_compare(const void *a, const void *b) {
- const uint16_t va = *(const uint16_t *)a;
- const uint16_t vb = *(const uint16_t *)b;
- return va - vb;
+// Merge the sorted list of cached colors(cached_colors[0...n_cached_colors-1])
+// and the sorted list of transmitted colors(colors[n_cached_colors...n-1]) into
+// one single sorted list(colors[...]).
+static void merge_colors(uint16_t *colors, uint16_t *cached_colors,
+ int n_colors, int n_cached_colors) {
+ if (n_cached_colors == 0) return;
+ int cache_idx = 0, trans_idx = n_cached_colors;
+ for (int i = 0; i < n_colors; ++i) {
+ if (cache_idx < n_cached_colors &&
+ (trans_idx >= n_colors ||
+ cached_colors[cache_idx] <= colors[trans_idx])) {
+ colors[i] = cached_colors[cache_idx++];
+ } else {
+ assert(trans_idx < n_colors);
+ colors[i] = colors[trans_idx++];
+ }
+ }
}
static void read_palette_colors_y(MACROBLOCKD *const xd, int bit_depth,
PALETTE_MODE_INFO *const pmi, aom_reader *r) {
uint16_t color_cache[2 * PALETTE_MAX_SIZE];
- const MODE_INFO *const above_mi = xd->above_mi;
- const MODE_INFO *const left_mi = xd->left_mi;
- const int n_cache = av1_get_palette_cache(above_mi, left_mi, 0, color_cache);
+ uint16_t cached_colors[PALETTE_MAX_SIZE];
+ const int n_cache = av1_get_palette_cache(xd, 0, color_cache);
const int n = pmi->palette_size[0];
int idx = 0;
for (int i = 0; i < n_cache && idx < n; ++i)
- if (aom_read_bit(r, ACCT_STR)) pmi->palette_colors[idx++] = color_cache[i];
+ if (aom_read_bit(r, ACCT_STR)) cached_colors[idx++] = color_cache[i];
if (idx < n) {
+ const int n_cached_colors = idx;
pmi->palette_colors[idx++] = aom_read_literal(r, bit_depth, ACCT_STR);
if (idx < n) {
const int min_bits = bit_depth - 3;
int bits = min_bits + aom_read_literal(r, 2, ACCT_STR);
int range = (1 << bit_depth) - pmi->palette_colors[idx - 1] - 1;
for (; idx < n; ++idx) {
+ assert(range >= 0);
const int delta = aom_read_literal(r, bits, ACCT_STR) + 1;
- pmi->palette_colors[idx] = pmi->palette_colors[idx - 1] + delta;
- range -= delta;
+ pmi->palette_colors[idx] = clamp(pmi->palette_colors[idx - 1] + delta,
+ 0, (1 << bit_depth) - 1);
+ range -= (pmi->palette_colors[idx] - pmi->palette_colors[idx - 1]);
bits = AOMMIN(bits, av1_ceil_log2(range));
}
}
+ merge_colors(pmi->palette_colors, cached_colors, n, n_cached_colors);
+ } else {
+ memcpy(pmi->palette_colors, cached_colors, n * sizeof(cached_colors[0]));
}
- qsort(pmi->palette_colors, n, sizeof(pmi->palette_colors[0]), uint16_compare);
}
static void read_palette_colors_uv(MACROBLOCKD *const xd, int bit_depth,
@@ -748,28 +745,34 @@ static void read_palette_colors_uv(MACROBLOCKD *const xd, int bit_depth,
const int n = pmi->palette_size[1];
// U channel colors.
uint16_t color_cache[2 * PALETTE_MAX_SIZE];
- const MODE_INFO *const above_mi = xd->above_mi;
- const MODE_INFO *const left_mi = xd->left_mi;
- const int n_cache = av1_get_palette_cache(above_mi, left_mi, 1, color_cache);
- int idx = PALETTE_MAX_SIZE;
- for (int i = 0; i < n_cache && idx < PALETTE_MAX_SIZE + n; ++i)
- if (aom_read_bit(r, ACCT_STR)) pmi->palette_colors[idx++] = color_cache[i];
- if (idx < PALETTE_MAX_SIZE + n) {
+ uint16_t cached_colors[PALETTE_MAX_SIZE];
+ const int n_cache = av1_get_palette_cache(xd, 1, color_cache);
+ int idx = 0;
+ for (int i = 0; i < n_cache && idx < n; ++i)
+ if (aom_read_bit(r, ACCT_STR)) cached_colors[idx++] = color_cache[i];
+ if (idx < n) {
+ const int n_cached_colors = idx;
+ idx += PALETTE_MAX_SIZE;
pmi->palette_colors[idx++] = aom_read_literal(r, bit_depth, ACCT_STR);
if (idx < PALETTE_MAX_SIZE + n) {
const int min_bits = bit_depth - 3;
int bits = min_bits + aom_read_literal(r, 2, ACCT_STR);
int range = (1 << bit_depth) - pmi->palette_colors[idx - 1];
for (; idx < PALETTE_MAX_SIZE + n; ++idx) {
+ assert(range >= 0);
const int delta = aom_read_literal(r, bits, ACCT_STR);
- pmi->palette_colors[idx] = pmi->palette_colors[idx - 1] + delta;
- range -= delta;
+ pmi->palette_colors[idx] = clamp(pmi->palette_colors[idx - 1] + delta,
+ 0, (1 << bit_depth) - 1);
+ range -= (pmi->palette_colors[idx] - pmi->palette_colors[idx - 1]);
bits = AOMMIN(bits, av1_ceil_log2(range));
}
}
+ merge_colors(pmi->palette_colors + PALETTE_MAX_SIZE, cached_colors, n,
+ n_cached_colors);
+ } else {
+ memcpy(pmi->palette_colors + PALETTE_MAX_SIZE, cached_colors,
+ n * sizeof(cached_colors[0]));
}
- qsort(pmi->palette_colors + PALETTE_MAX_SIZE, n,
- sizeof(pmi->palette_colors[0]), uint16_compare);
// V channel colors.
if (aom_read_bit(r, ACCT_STR)) { // Delta encoding.
@@ -804,6 +807,10 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const BLOCK_SIZE bsize = mbmi->sb_type;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+ assert(bsize >= BLOCK_8X8 && bsize <= BLOCK_LARGEST);
+ const int block_palette_idx = bsize - BLOCK_8X8;
+ int modev;
+
if (mbmi->mode == DC_PRED) {
int palette_y_mode_ctx = 0;
if (above_mi) {
@@ -814,12 +821,21 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
palette_y_mode_ctx +=
(left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
}
- if (aom_read(r, av1_default_palette_y_mode_prob[bsize - BLOCK_8X8]
- [palette_y_mode_ctx],
- ACCT_STR)) {
+#if CONFIG_NEW_MULTISYMBOL
+ modev = aom_read_symbol(
+ r,
+ xd->tile_ctx->palette_y_mode_cdf[block_palette_idx][palette_y_mode_ctx],
+ 2, ACCT_STR);
+#else
+ modev = aom_read(
+ r,
+ av1_default_palette_y_mode_prob[block_palette_idx][palette_y_mode_ctx],
+ ACCT_STR);
+#endif
+ if (modev) {
pmi->palette_size[0] =
aom_read_symbol(r,
- xd->tile_ctx->palette_y_size_cdf[bsize - BLOCK_8X8],
+ xd->tile_ctx->palette_y_size_cdf[block_palette_idx],
PALETTE_SIZES, ACCT_STR) +
2;
#if CONFIG_PALETTE_DELTA_ENCODING
@@ -830,14 +846,19 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
#endif // CONFIG_PALETTE_DELTA_ENCODING
}
}
-
if (mbmi->uv_mode == UV_DC_PRED) {
const int palette_uv_mode_ctx = (pmi->palette_size[0] > 0);
- if (aom_read(r, av1_default_palette_uv_mode_prob[palette_uv_mode_ctx],
- ACCT_STR)) {
+#if CONFIG_NEW_MULTISYMBOL
+ modev = aom_read_symbol(
+ r, xd->tile_ctx->palette_uv_mode_cdf[palette_uv_mode_ctx], 2, ACCT_STR);
+#else
+ modev = aom_read(r, av1_default_palette_uv_mode_prob[palette_uv_mode_ctx],
+ ACCT_STR);
+#endif
+ if (modev) {
pmi->palette_size[1] =
aom_read_symbol(r,
- xd->tile_ctx->palette_uv_size_cdf[bsize - BLOCK_8X8],
+ xd->tile_ctx->palette_uv_size_cdf[block_palette_idx],
PALETTE_SIZES, ACCT_STR) +
2;
#if CONFIG_PALETTE_DELTA_ENCODING
@@ -853,7 +874,6 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
}
}
}
-#endif // CONFIG_PALETTE
#if CONFIG_FILTER_INTRA
static void read_filter_intra_mode_info(AV1_COMMON *const cm,
@@ -865,11 +885,7 @@ static void read_filter_intra_mode_info(AV1_COMMON *const cm,
FILTER_INTRA_MODE_INFO *filter_intra_mode_info =
&mbmi->filter_intra_mode_info;
- if (mbmi->mode == DC_PRED
-#if CONFIG_PALETTE
- && mbmi->palette_mode_info.palette_size[0] == 0
-#endif // CONFIG_PALETTE
- ) {
+ if (mbmi->mode == DC_PRED && mbmi->palette_mode_info.palette_size[0] == 0) {
filter_intra_mode_info->use_filter_intra_mode[0] =
aom_read(r, cm->fc->filter_intra_probs[0], ACCT_STR);
if (filter_intra_mode_info->use_filter_intra_mode[0]) {
@@ -892,11 +908,8 @@ static void read_filter_intra_mode_info(AV1_COMMON *const cm,
(void)mi_col;
#endif // CONFIG_CB4X4
- if (mbmi->uv_mode == UV_DC_PRED
-#if CONFIG_PALETTE
- && mbmi->palette_mode_info.palette_size[1] == 0
-#endif // CONFIG_PALETTE
- ) {
+ if (mbmi->uv_mode == UV_DC_PRED &&
+ mbmi->palette_mode_info.palette_size[1] == 0) {
filter_intra_mode_info->use_filter_intra_mode[1] =
aom_read(r, cm->fc->filter_intra_probs[1], ACCT_STR);
if (filter_intra_mode_info->use_filter_intra_mode[1]) {
@@ -926,6 +939,9 @@ static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
mbmi->angle_delta[0] = 0;
mbmi->angle_delta[1] = 0;
+#if CONFIG_INTRA_INTERP
+ mbmi->intra_filter = INTRA_FILTER_LINEAR;
+#endif // CONFIG_INTRA_INTERP
if (!av1_use_angle_delta(bsize)) return;
@@ -939,8 +955,6 @@ static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
mbmi->intra_filter = aom_read_symbol(r, ec_ctx->intra_filter_cdf[ctx],
INTRA_FILTERS, ACCT_STR);
if (counts) ++counts->intra_filter[ctx][mbmi->intra_filter];
- } else {
- mbmi->intra_filter = INTRA_FILTER_LINEAR;
}
#endif // CONFIG_INTRA_INTERP
}
@@ -980,6 +994,9 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
(void)block;
TX_TYPE *tx_type = &mbmi->txk_type[(blk_row << 4) + blk_col];
#endif
+#if CONFIG_LGT_FROM_PRED
+ mbmi->use_lgt = 0;
+#endif
if (!FIXED_TX_TYPE) {
#if CONFIG_EXT_TX
@@ -993,29 +1010,91 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
!supertx_enabled &&
#endif // CONFIG_SUPERTX
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+ const TxSetType tx_set_type = get_ext_tx_set_type(
+ tx_size, mbmi->sb_type, inter_block, cm->reduced_tx_set_used);
const int eset = get_ext_tx_set(tx_size, mbmi->sb_type, inter_block,
cm->reduced_tx_set_used);
// eset == 0 should correspond to a set with only DCT_DCT and
// there is no need to read the tx_type
assert(eset != 0);
- FRAME_COUNTS *counts = xd->counts;
+#if !CONFIG_LGT_FROM_PRED
if (inter_block) {
- *tx_type = av1_ext_tx_inter_inv[eset][aom_read_symbol(
+ *tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
- ext_tx_cnt_inter[eset], ACCT_STR)];
- if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type];
+ av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
} else if (ALLOW_INTRA_EXT_TX) {
- *tx_type = av1_ext_tx_intra_inv[eset][aom_read_symbol(
+ *tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
- ext_tx_cnt_intra[eset], ACCT_STR)];
- if (counts)
- ++counts->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type];
+ av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
}
+#else
+ // only signal tx_type when lgt is not allowed or not selected
+ if (inter_block) {
+ if (LGT_FROM_PRED_INTER) {
+ if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used) {
+ mbmi->use_lgt =
+ aom_read(r, ec_ctx->inter_lgt_prob[square_tx_size], ACCT_STR);
+#if CONFIG_ENTROPY_STATS
+ if (counts) ++counts->inter_lgt[square_tx_size][mbmi->use_lgt];
+#endif // CONFIG_ENTROPY_STATS
+ }
+ if (!mbmi->use_lgt) {
+ *tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
+ r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
+ av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
+#if CONFIG_ENTROPY_STATS
+ if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type];
+#endif // CONFIG_ENTROPY_STATS
+ } else {
+ *tx_type = DCT_DCT; // assign a dummy tx_type
+ }
+ } else {
+ *tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
+ r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
+ av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
+#if CONFIG_ENTROPY_STATS
+ if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type];
+#endif // CONFIG_ENTROPY_STATS
+ }
+ } else if (ALLOW_INTRA_EXT_TX) {
+ if (LGT_FROM_PRED_INTRA) {
+ if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used) {
+ mbmi->use_lgt =
+ aom_read(r, ec_ctx->intra_lgt_prob[square_tx_size][mbmi->mode],
+ ACCT_STR);
+#if CONFIG_ENTROPY_STATS
+ if (counts)
+ ++counts->intra_lgt[square_tx_size][mbmi->mode][mbmi->use_lgt];
+#endif // CONFIG_ENTROPY_STATS
+ }
+ if (!mbmi->use_lgt) {
+ *tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
+ r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
+ av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
+#if CONFIG_ENTROPY_STATS
+ if (counts)
+ ++counts
+ ->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type];
+#endif // CONFIG_ENTROPY_STATS
+ } else {
+ *tx_type = DCT_DCT; // assign a dummy tx_type
+ }
+ } else {
+ *tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
+ r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
+ av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
+#if CONFIG_ENTROPY_STATS
+ if (counts)
+ ++counts->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type];
+#endif // CONFIG_ENTROPY_STATS
+ }
+ }
+#endif // CONFIG_LGT_FROM_PRED
} else {
*tx_type = DCT_DCT;
}
-#else
+#else // CONFIG_EXT_TX
if (tx_size < TX_32X32 &&
((!cm->seg.enabled && cm->base_qindex > 0) ||
@@ -1025,18 +1104,23 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
!supertx_enabled &&
#endif // CONFIG_SUPERTX
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+#if CONFIG_ENTROPY_STATS
FRAME_COUNTS *counts = xd->counts;
-
+#endif // CONFIG_ENTROPY_STATS
if (inter_block) {
*tx_type = av1_ext_tx_inv[aom_read_symbol(
r, ec_ctx->inter_ext_tx_cdf[tx_size], TX_TYPES, ACCT_STR)];
+#if CONFIG_ENTROPY_STATS
if (counts) ++counts->inter_ext_tx[tx_size][*tx_type];
+#endif // CONFIG_ENTROPY_STATS
} else {
const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
*tx_type = av1_ext_tx_inv[aom_read_symbol(
r, ec_ctx->intra_ext_tx_cdf[tx_size][tx_type_nom], TX_TYPES,
ACCT_STR)];
+#if CONFIG_ENTROPY_STATS
if (counts) ++counts->intra_ext_tx[tx_size][tx_type_nom][*tx_type];
+#endif // CONFIG_ENTROPY_STATS
}
} else {
*tx_type = DCT_DCT;
@@ -1091,7 +1175,6 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
mbmi->segment_id = read_intra_segment_id(cm, xd, mi_offset, x_mis, y_mis, r);
mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
-#if CONFIG_DELTA_Q
if (cm->delta_q_present_flag) {
xd->current_qindex =
xd->prev_qindex +
@@ -1101,40 +1184,52 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
xd->prev_qindex = xd->current_qindex;
#if CONFIG_EXT_DELTA_Q
if (cm->delta_lf_present_flag) {
- mbmi->current_delta_lf_from_base = xd->current_delta_lf_from_base =
+#if CONFIG_LOOPFILTER_LEVEL
+ if (cm->delta_lf_multi) {
+ for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id) {
+ mbmi->curr_delta_lf[lf_id] = xd->curr_delta_lf[lf_id] =
+ xd->prev_delta_lf[lf_id] +
+ read_delta_lflevel(cm, xd, r, lf_id, mbmi, mi_col, mi_row) *
+ cm->delta_lf_res;
+ xd->prev_delta_lf[lf_id] = xd->curr_delta_lf[lf_id];
+ }
+ } else {
+ mbmi->current_delta_lf_from_base = xd->current_delta_lf_from_base =
+ xd->prev_delta_lf_from_base +
+ read_delta_lflevel(cm, xd, r, -1, mbmi, mi_col, mi_row) *
+ cm->delta_lf_res;
+ xd->prev_delta_lf_from_base = xd->current_delta_lf_from_base;
+ }
+#else
+ const int current_delta_lf_from_base =
xd->prev_delta_lf_from_base +
read_delta_lflevel(cm, xd, r, mbmi, mi_col, mi_row) *
cm->delta_lf_res;
+ mbmi->current_delta_lf_from_base = xd->current_delta_lf_from_base =
+ clamp(current_delta_lf_from_base, 0, MAX_LOOP_FILTER);
xd->prev_delta_lf_from_base = xd->current_delta_lf_from_base;
+#endif // CONFIG_LOOPFILTER_LEVEL
}
#endif
}
-#endif
mbmi->ref_frame[0] = INTRA_FRAME;
mbmi->ref_frame[1] = NONE_FRAME;
#if CONFIG_INTRABC
- if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools) {
- mbmi->use_intrabc = aom_read(r, ec_ctx->intrabc_prob, ACCT_STR);
+ if (av1_allow_intrabc(bsize, cm)) {
+ mbmi->use_intrabc = aom_read_symbol(r, ec_ctx->intrabc_cdf, 2, ACCT_STR);
if (mbmi->use_intrabc) {
mbmi->tx_size = read_tx_size(cm, xd, 1, !mbmi->skip, r);
mbmi->mode = mbmi->uv_mode = UV_DC_PRED;
-#if CONFIG_DUAL_FILTER
- for (int idx = 0; idx < 4; ++idx) mbmi->interp_filter[idx] = BILINEAR;
-#else
- mbmi->interp_filter = BILINEAR;
-#endif
+ mbmi->interp_filters = av1_broadcast_interp_filter(BILINEAR);
int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES];
- int_mv ref_mvs[MAX_MV_REF_CANDIDATES] = {};
+ int_mv ref_mvs[MAX_MV_REF_CANDIDATES];
av1_find_mv_refs(cm, xd, mi, INTRA_FRAME, &xd->ref_mv_count[INTRA_FRAME],
- xd->ref_mv_stack[INTRA_FRAME],
-#if CONFIG_EXT_INTER
- NULL,
-#endif // CONFIG_EXT_INTER
- ref_mvs, mi_row, mi_col, NULL, NULL, inter_mode_ctx);
+ xd->ref_mv_stack[INTRA_FRAME], NULL, ref_mvs, mi_row,
+ mi_col, NULL, NULL, inter_mode_ctx);
int_mv nearestmv, nearmv;
av1_find_best_ref_mvs(0, ref_mvs, &nearestmv, &nearmv);
@@ -1201,15 +1296,18 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
#if CONFIG_CB4X4
if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
xd->plane[1].subsampling_y)) {
- mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode);
-#else
- mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode);
-#endif
+#if CONFIG_CFL
+ xd->cfl->is_chroma_reference = 1;
+#endif // CONFIG_CFL
+#endif // CONFIG_CB4X4
+ mbmi->uv_mode = read_intra_mode_uv(ec_ctx, r, mbmi->mode);
#if CONFIG_CFL
- // TODO(ltrudeau) support PALETTE
- if (mbmi->uv_mode == UV_DC_PRED) {
- mbmi->cfl_alpha_idx = read_cfl_alphas(ec_ctx, r, mbmi->cfl_alpha_signs);
+ if (mbmi->uv_mode == UV_CFL_PRED) {
+ mbmi->cfl_alpha_idx = read_cfl_alphas(ec_ctx, r, &mbmi->cfl_alpha_signs);
+ xd->cfl->store_y = 1;
+ } else {
+ xd->cfl->store_y = 0;
}
#endif // CONFIG_CFL
@@ -1217,18 +1315,20 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
} else {
// Avoid decoding angle_info if there is is no chroma prediction
mbmi->uv_mode = UV_DC_PRED;
+#if CONFIG_CFL
+ xd->cfl->is_chroma_reference = 0;
+ xd->cfl->store_y = 1;
+#endif
}
#endif
#if CONFIG_EXT_INTRA
read_intra_angle_info(cm, xd, r);
#endif // CONFIG_EXT_INTRA
-#if CONFIG_PALETTE
mbmi->palette_mode_info.palette_size[0] = 0;
mbmi->palette_mode_info.palette_size[1] = 0;
- if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools)
+ if (av1_allow_palette(cm->allow_screen_content_tools, bsize))
read_palette_mode_info(cm, xd, r);
-#endif // CONFIG_PALETTE
#if CONFIG_FILTER_INTRA
mbmi->filter_intra_mode_info.use_filter_intra_mode[0] = 0;
mbmi->filter_intra_mode_info.use_filter_intra_mode[1] = 0;
@@ -1246,9 +1346,9 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
}
static int read_mv_component(aom_reader *r, nmv_component *mvcomp,
-#if CONFIG_INTRABC
+#if CONFIG_INTRABC || CONFIG_AMVR
int use_subpel,
-#endif // CONFIG_INTRABC
+#endif // CONFIG_INTRABC || CONFIG_AMVR
int usehp) {
int mag, d, fr, hp;
#if CONFIG_NEW_MULTISYMBOL
@@ -1271,15 +1371,19 @@ static int read_mv_component(aom_reader *r, nmv_component *mvcomp,
} else {
int i;
const int n = mv_class + CLASS0_BITS - 1; // number of bits
-
d = 0;
+#if CONFIG_NEW_MULTISYMBOL
+ for (i = 0; i < n; ++i)
+ d |= aom_read_symbol(r, mvcomp->bits_cdf[(i + 1) / 2], 2, ACCT_STR) << i;
+#else
for (i = 0; i < n; ++i) d |= aom_read(r, mvcomp->bits[i], ACCT_STR) << i;
+#endif
mag = CLASS0_SIZE << (mv_class + 2);
}
-#if CONFIG_INTRABC
+#if CONFIG_INTRABC || CONFIG_AMVR
if (use_subpel) {
-#endif // CONFIG_INTRABC
+#endif // CONFIG_INTRABC || CONFIG_AMVR
// Fractional part
fr = aom_read_symbol(r, class0 ? mvcomp->class0_fp_cdf[d] : mvcomp->fp_cdf,
MV_FP_SIZE, ACCT_STR);
@@ -1294,12 +1398,12 @@ static int read_mv_component(aom_reader *r, nmv_component *mvcomp,
hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp, ACCT_STR)
: 1;
#endif
-#if CONFIG_INTRABC
+#if CONFIG_INTRABC || CONFIG_AMVR
} else {
fr = 3;
hp = 1;
}
-#endif // CONFIG_INTRABC
+#endif // CONFIG_INTRABC || CONFIG_AMVR
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
@@ -1316,16 +1420,16 @@ static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
if (mv_joint_vertical(joint_type))
diff.row = read_mv_component(r, &ctx->comps[0],
-#if CONFIG_INTRABC
+#if CONFIG_INTRABC || CONFIG_AMVR
precision > MV_SUBPEL_NONE,
-#endif // CONFIG_INTRABC
+#endif // CONFIG_INTRABC || CONFIG_AMVR
precision > MV_SUBPEL_LOW_PRECISION);
if (mv_joint_horizontal(joint_type))
diff.col = read_mv_component(r, &ctx->comps[1],
-#if CONFIG_INTRABC
+#if CONFIG_INTRABC || CONFIG_AMVR
precision > MV_SUBPEL_NONE,
-#endif // CONFIG_INTRABC
+#endif // CONFIG_INTRABC || CONFIG_AMVR
precision > MV_SUBPEL_LOW_PRECISION);
av1_inc_mv(&diff, counts, precision);
@@ -1337,9 +1441,7 @@ static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
const MACROBLOCKD *xd,
aom_reader *r) {
-#if !SUB8X8_COMP_REF
- if (xd->mi[0]->mbmi.sb_type == BLOCK_4X4) return SINGLE_REFERENCE;
-#endif
+ if (!is_comp_ref_allowed(xd->mi[0]->mbmi.sb_type)) return SINGLE_REFERENCE;
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
const int ctx = av1_get_reference_mode_context(cm, xd);
#if CONFIG_NEW_MULTISYMBOL
@@ -1360,29 +1462,41 @@ static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
#if CONFIG_NEW_MULTISYMBOL
#define READ_REF_BIT(pname) \
aom_read_symbol(r, av1_get_pred_cdf_##pname(cm, xd), 2, ACCT_STR)
+#define READ_REF_BIT2(pname) \
+ aom_read_symbol(r, av1_get_pred_cdf_##pname(xd), 2, ACCT_STR)
#else
#define READ_REF_BIT(pname) \
aom_read(r, av1_get_pred_prob_##pname(cm, xd), ACCT_STR)
+#define READ_REF_BIT2(pname) \
+ aom_read(r, av1_get_pred_prob_##pname(cm, xd), ACCT_STR)
#endif
#if CONFIG_EXT_COMP_REFS
-static REFERENCE_MODE read_comp_reference_type(AV1_COMMON *cm,
- const MACROBLOCKD *xd,
- aom_reader *r) {
+static COMP_REFERENCE_TYPE read_comp_reference_type(AV1_COMMON *cm,
+ const MACROBLOCKD *xd,
+ aom_reader *r) {
const int ctx = av1_get_comp_reference_type_context(xd);
#if USE_UNI_COMP_REFS
COMP_REFERENCE_TYPE comp_ref_type;
#if CONFIG_VAR_REFS
- if ((L_OR_L2(cm) || L3_OR_G(cm)) && BWD_OR_ALT(cm))
- if (L_AND_L2(cm) || L_AND_L3(cm) || L_AND_G(cm) || BWD_AND_ALT(cm))
+ if ((L_OR_L2(cm) || L3_OR_G(cm)) && BWD_OR_ALT(cm)) {
+ if (L_AND_L2(cm) || L_AND_L3(cm) || L_AND_G(cm) || BWD_AND_ALT(cm)) {
#endif // CONFIG_VAR_REFS
- comp_ref_type = (COMP_REFERENCE_TYPE)aom_read(
- r, cm->fc->comp_ref_type_prob[ctx], ACCT_STR);
+#if CONFIG_NEW_MULTISYMBOL
+ (void)cm;
+ comp_ref_type = (COMP_REFERENCE_TYPE)aom_read_symbol(
+ r, xd->tile_ctx->comp_ref_type_cdf[ctx], 2, ACCT_STR);
+#else
+ comp_ref_type = (COMP_REFERENCE_TYPE)aom_read(
+ r, cm->fc->comp_ref_type_prob[ctx], ACCT_STR);
+#endif
#if CONFIG_VAR_REFS
- else
+ } else {
comp_ref_type = BIDIR_COMP_REFERENCE;
- else
+ }
+ } else {
comp_ref_type = UNIDIR_COMP_REFERENCE;
+ }
#endif // CONFIG_VAR_REFS
#else // !USE_UNI_COMP_REFS
// TODO(zoeliu): Temporarily turn off uni-directional comp refs
@@ -1398,9 +1512,6 @@ static REFERENCE_MODE read_comp_reference_type(AV1_COMMON *cm,
static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
aom_reader *r, int segment_id,
MV_REFERENCE_FRAME ref_frame[2]) {
-#if CONFIG_EXT_COMP_REFS
- FRAME_CONTEXT *const fc = cm->fc;
-#endif
FRAME_COUNTS *counts = xd->counts;
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
@@ -1426,7 +1537,7 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
#if CONFIG_VAR_REFS
if ((L_AND_L2(cm) || L_AND_L3(cm) || L_AND_G(cm)) && BWD_AND_ALT(cm))
#endif // CONFIG_VAR_REFS
- bit = aom_read(r, fc->uni_comp_ref_prob[ctx][0], ACCT_STR);
+ bit = READ_REF_BIT2(uni_comp_ref_p);
#if CONFIG_VAR_REFS
else
bit = BWD_AND_ALT(cm);
@@ -1442,7 +1553,7 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
#if CONFIG_VAR_REFS
if (L_AND_L2(cm) && (L_AND_L3(cm) || L_AND_G(cm)))
#endif // CONFIG_VAR_REFS
- bit1 = aom_read(r, fc->uni_comp_ref_prob[ctx1][1], ACCT_STR);
+ bit1 = READ_REF_BIT2(uni_comp_ref_p1);
#if CONFIG_VAR_REFS
else
bit1 = L_AND_L3(cm) || L_AND_G(cm);
@@ -1455,7 +1566,7 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
#if CONFIG_VAR_REFS
if (L_AND_L3(cm) && L_AND_G(cm))
#endif // CONFIG_VAR_REFS
- bit2 = aom_read(r, fc->uni_comp_ref_prob[ctx2][2], ACCT_STR);
+ bit2 = READ_REF_BIT2(uni_comp_ref_p2);
#if CONFIG_VAR_REFS
else
bit2 = L_AND_G(cm);
@@ -1482,15 +1593,15 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
#endif // CONFIG_EXT_COMP_REFS
// Normative in decoder (for low delay)
-#if CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS
+#if CONFIG_ONE_SIDED_COMPOUND || CONFIG_FRAME_SIGN_BIAS
const int idx = 1;
-#else // !(CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS)
+#else // !(CONFIG_ONE_SIDED_COMPOUND || CONFIG_FRAME_SIGN_BIAS)
#if CONFIG_EXT_REFS
const int idx = cm->ref_frame_sign_bias[cm->comp_bwd_ref[0]];
#else // !CONFIG_EXT_REFS
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
#endif // CONFIG_EXT_REFS
-#endif // CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS
+#endif // CONFIG_ONE_SIDED_COMPOUND || CONFIG_FRAME_SIGN_BIAS)
const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
#if CONFIG_VAR_REFS
@@ -1541,12 +1652,8 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const int ctx_bwd = av1_get_pred_context_comp_bwdref_p(cm, xd);
#if CONFIG_VAR_REFS
int bit_bwd;
-// Test need to explicitly code (BWD/ALT2) vs (ALT) branch node in tree
-#if CONFIG_ALTREF2
+ // Test need to explicitly code (BWD/ALT2) vs (ALT) branch node in tree
const int bit_bwd_uncertain = BWD_OR_ALT2(cm) && ALTREF_IS_VALID(cm);
-#else // !CONFIG_ALTREF2
- const int bit_bwd_uncertain = BWD_AND_ALT(cm);
-#endif // CONFIG_ALTREF2
if (bit_bwd_uncertain)
bit_bwd = READ_REF_BIT(comp_bwdref_p);
else
@@ -1555,7 +1662,6 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const int bit_bwd = READ_REF_BIT(comp_bwdref_p);
#endif // CONFIG_VAR_REFS
if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
-#if CONFIG_ALTREF2
if (!bit_bwd) {
const int ctx1_bwd = av1_get_pred_context_comp_bwdref_p1(cm, xd);
#if CONFIG_VAR_REFS
@@ -1572,9 +1678,6 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
} else {
ref_frame[idx] = cm->comp_bwd_ref[2];
}
-#else // !CONFIG_ALTREF2
- ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
-#endif // CONFIG_ALTREF2
#else // !CONFIG_EXT_REFS
ref_frame[!idx] = cm->comp_var_ref[bit];
ref_frame[idx] = cm->comp_fixed_ref;
@@ -1584,12 +1687,13 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
#if CONFIG_VAR_REFS
int bit0;
- // Test need to explicitly code (L,L2,L3,G) vs (BWD,ALT) branch node in
- // tree
- if ((L_OR_L2(cm) || L3_OR_G(cm)) && BWD_OR_ALT(cm))
+ // Test need to explicitly code (L,L2,L3,G) vs (BWD,ALT2,ALT) branch node
+ // in tree
+ if ((L_OR_L2(cm) || L3_OR_G(cm)) &&
+ (BWD_OR_ALT2(cm) || ALTREF_IS_VALID(cm)))
bit0 = READ_REF_BIT(single_ref_p1);
else
- bit0 = BWD_OR_ALT(cm);
+ bit0 = (BWD_OR_ALT2(cm) || ALTREF_IS_VALID(cm));
#else // !CONFIG_VAR_REFS
const int bit0 = READ_REF_BIT(single_ref_p1);
#endif // CONFIG_VAR_REFS
@@ -1599,12 +1703,8 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
#if CONFIG_VAR_REFS
int bit1;
-// Test need to explicitly code (BWD/ALT2) vs (ALT) branch node in tree
-#if CONFIG_ALTREF2
+ // Test need to explicitly code (BWD/ALT2) vs (ALT) branch node in tree
const int bit1_uncertain = BWD_OR_ALT2(cm) && ALTREF_IS_VALID(cm);
-#else // !CONFIG_ALTREF2
- const int bit1_uncertain = BWD_AND_ALT(cm);
-#endif // CONFIG_ALTREF2
if (bit1_uncertain)
bit1 = READ_REF_BIT(single_ref_p2);
else
@@ -1613,7 +1713,6 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const int bit1 = READ_REF_BIT(single_ref_p2);
#endif // CONFIG_VAR_REFS
if (counts) ++counts->single_ref[ctx1][1][bit1];
-#if CONFIG_ALTREF2
if (!bit1) {
const int ctx5 = av1_get_pred_context_single_ref_p6(xd);
#if CONFIG_VAR_REFS
@@ -1630,9 +1729,6 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
} else {
ref_frame[0] = ALTREF_FRAME;
}
-#else // !CONFIG_ALTREF2
- ref_frame[0] = bit1 ? ALTREF_FRAME : BWDREF_FRAME;
-#endif // CONFIG_ALTREF2
} else {
const int ctx2 = av1_get_pred_context_single_ref_p3(xd);
#if CONFIG_VAR_REFS
@@ -1710,46 +1806,33 @@ static INLINE void read_mb_interp_filter(AV1_COMMON *const cm,
return;
}
-#if CONFIG_DUAL_FILTER
if (cm->interp_filter != SWITCHABLE) {
- int dir;
-
- for (dir = 0; dir < 4; ++dir) mbmi->interp_filter[dir] = cm->interp_filter;
+ mbmi->interp_filters = av1_broadcast_interp_filter(cm->interp_filter);
} else {
- int dir;
-
- for (dir = 0; dir < 2; ++dir) {
- const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
- mbmi->interp_filter[dir] = EIGHTTAP_REGULAR;
-
+#if CONFIG_DUAL_FILTER
+ InterpFilter ref0_filter[2] = { EIGHTTAP_REGULAR, EIGHTTAP_REGULAR };
+ for (int dir = 0; dir < 2; ++dir) {
if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
(mbmi->ref_frame[1] > INTRA_FRAME &&
has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
- mbmi->interp_filter[dir] =
- (InterpFilter)av1_switchable_interp_inv[aom_read_symbol(
- r, ec_ctx->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS,
- ACCT_STR)];
- if (counts) ++counts->switchable_interp[ctx][mbmi->interp_filter[dir]];
+ const int ctx = av1_get_pred_context_switchable_interp(xd, dir);
+ ref0_filter[dir] =
+ (InterpFilter)aom_read_symbol(r, ec_ctx->switchable_interp_cdf[ctx],
+ SWITCHABLE_FILTERS, ACCT_STR);
+ if (counts) ++counts->switchable_interp[ctx][ref0_filter[dir]];
}
}
- // The index system works as:
- // (0, 1) -> (vertical, horizontal) filter types for the first ref frame.
- // (2, 3) -> (vertical, horizontal) filter types for the second ref frame.
- mbmi->interp_filter[2] = mbmi->interp_filter[0];
- mbmi->interp_filter[3] = mbmi->interp_filter[1];
- }
+ // The index system works as: (0, 1) -> (vertical, horizontal) filter types
+ mbmi->interp_filters =
+ av1_make_interp_filters(ref0_filter[0], ref0_filter[1]);
#else // CONFIG_DUAL_FILTER
- if (cm->interp_filter != SWITCHABLE) {
- mbmi->interp_filter = cm->interp_filter;
- } else {
const int ctx = av1_get_pred_context_switchable_interp(xd);
- mbmi->interp_filter =
- (InterpFilter)av1_switchable_interp_inv[aom_read_symbol(
- r, ec_ctx->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS,
- ACCT_STR)];
- if (counts) ++counts->switchable_interp[ctx][mbmi->interp_filter];
- }
+ InterpFilter filter = (InterpFilter)aom_read_symbol(
+ r, ec_ctx->switchable_interp_cdf[ctx], SWITCHABLE_FILTERS, ACCT_STR);
+ mbmi->interp_filters = av1_broadcast_interp_filter(filter);
+ if (counts) ++counts->switchable_interp[ctx][filter];
#endif // CONFIG_DUAL_FILTER
+ }
}
static void read_intra_block_mode_info(AV1_COMMON *const cm, const int mi_row,
@@ -1766,62 +1849,74 @@ static void read_intra_block_mode_info(AV1_COMMON *const cm, const int mi_row,
#if CONFIG_CB4X4
(void)i;
- mbmi->mode = read_intra_mode_y(ec_ctx, xd, r, size_group_lookup[bsize]);
+ mbmi->mode = read_intra_mode(r, ec_ctx->y_mode_cdf[size_group_lookup[bsize]]);
#else
switch (bsize) {
case BLOCK_4X4:
for (i = 0; i < 4; ++i)
- mi->bmi[i].as_mode = read_intra_mode_y(ec_ctx, xd, r, 0);
+ mi->bmi[i].as_mode = read_intra_mode(r, ec_ctx->y_mode_cdf[0]);
mbmi->mode = mi->bmi[3].as_mode;
break;
case BLOCK_4X8:
mi->bmi[0].as_mode = mi->bmi[2].as_mode =
- read_intra_mode_y(ec_ctx, xd, r, 0);
+ read_intra_mode(r, ec_ctx->y_mode_cdf[0]);
mi->bmi[1].as_mode = mi->bmi[3].as_mode = mbmi->mode =
- read_intra_mode_y(ec_ctx, xd, r, 0);
+ read_intra_mode(r, ec_ctx->y_mode_cdf[0]);
break;
case BLOCK_8X4:
mi->bmi[0].as_mode = mi->bmi[1].as_mode =
- read_intra_mode_y(ec_ctx, xd, r, 0);
+ read_intra_mode(r, ec_ctx->y_mode_cdf[0]);
mi->bmi[2].as_mode = mi->bmi[3].as_mode = mbmi->mode =
- read_intra_mode_y(ec_ctx, xd, r, 0);
+ read_intra_mode(r, ec_ctx->y_mode_cdf[0]);
break;
default:
- mbmi->mode = read_intra_mode_y(ec_ctx, xd, r, size_group_lookup[bsize]);
+ mbmi->mode =
+ read_intra_mode(r, ec_ctx->y_mode_cdf[size_group_lookup[bsize]]);
}
#endif
#if CONFIG_CB4X4
if (is_chroma_reference(mi_row, mi_col, bsize, xd->plane[1].subsampling_x,
xd->plane[1].subsampling_y)) {
- mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode);
+ mbmi->uv_mode = read_intra_mode_uv(ec_ctx, r, mbmi->mode);
#else
- mbmi->uv_mode = read_intra_mode_uv(ec_ctx, xd, r, mbmi->mode);
+ mbmi->uv_mode = read_intra_mode_uv(ec_ctx, r, mbmi->mode);
(void)mi_row;
(void)mi_col;
#endif
#if CONFIG_CFL
- // TODO(ltrudeau) support PALETTE
- if (mbmi->uv_mode == UV_DC_PRED) {
+ if (mbmi->uv_mode == UV_CFL_PRED) {
mbmi->cfl_alpha_idx =
- read_cfl_alphas(xd->tile_ctx, r, mbmi->cfl_alpha_signs);
+ read_cfl_alphas(xd->tile_ctx, r, &mbmi->cfl_alpha_signs);
+ xd->cfl->store_y = 1;
+ } else {
+ xd->cfl->store_y = 0;
}
#endif // CONFIG_CFL
#if CONFIG_CB4X4
+ } else {
+ // Avoid decoding angle_info if there is is no chroma prediction
+ mbmi->uv_mode = UV_DC_PRED;
+#if CONFIG_CFL
+ xd->cfl->is_chroma_reference = 0;
+ xd->cfl->store_y = 1;
+#endif
}
#endif
+ // Explicitly ignore cm here to avoid a compile warning if none of
+ // ext-intra, palette and filter-intra are enabled.
+ (void)cm;
+
#if CONFIG_EXT_INTRA
read_intra_angle_info(cm, xd, r);
#endif // CONFIG_EXT_INTRA
-#if CONFIG_PALETTE
mbmi->palette_mode_info.palette_size[0] = 0;
mbmi->palette_mode_info.palette_size[1] = 0;
- if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools)
+ if (av1_allow_palette(cm->allow_screen_content_tools, bsize))
read_palette_mode_info(cm, xd, r);
-#endif // CONFIG_PALETTE
#if CONFIG_FILTER_INTRA
mbmi->filter_intra_mode_info.use_filter_intra_mode[0] = 0;
mbmi->filter_intra_mode_info.use_filter_intra_mode[1] = 0;
@@ -1859,7 +1954,11 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
(void)mi_row;
(void)mi_col;
(void)bsize;
-
+#if CONFIG_AMVR
+ if (cm->cur_frame_mv_precision_level) {
+ allow_hp = MV_SUBPEL_NONE;
+ }
+#endif
switch (mode) {
case NEWMV: {
FRAME_COUNTS *counts = xd->counts;
@@ -1898,12 +1997,22 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_GLOBAL_MOTION
mv[0].as_int = gm_get_motion_vector(&cm->global_motion[ref_frame[0]],
cm->allow_high_precision_mv, bsize,
- mi_col, mi_row, block)
+ mi_col, mi_row, block
+#if CONFIG_AMVR
+ ,
+ cm->cur_frame_mv_precision_level
+#endif
+ )
.as_int;
if (is_compound)
mv[1].as_int = gm_get_motion_vector(&cm->global_motion[ref_frame[1]],
cm->allow_high_precision_mv, bsize,
- mi_col, mi_row, block)
+ mi_col, mi_row, block
+#if CONFIG_AMVR
+ ,
+ cm->cur_frame_mv_precision_level
+#endif
+ )
.as_int;
#else
mv[0].as_int = 0;
@@ -1914,7 +2023,6 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
if (is_compound) pred_mv[1].as_int = mv[1].as_int;
break;
}
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
case SR_NEAREST_NEARMV: {
assert(!is_compound);
@@ -2083,11 +2191,21 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
#if CONFIG_GLOBAL_MOTION
mv[0].as_int = gm_get_motion_vector(&cm->global_motion[ref_frame[0]],
cm->allow_high_precision_mv, bsize,
- mi_col, mi_row, block)
+ mi_col, mi_row, block
+#if CONFIG_AMVR
+ ,
+ cm->cur_frame_mv_precision_level
+#endif
+ )
.as_int;
mv[1].as_int = gm_get_motion_vector(&cm->global_motion[ref_frame[1]],
cm->allow_high_precision_mv, bsize,
- mi_col, mi_row, block)
+ mi_col, mi_row, block
+#if CONFIG_AMVR
+ ,
+ cm->cur_frame_mv_precision_level
+#endif
+ )
.as_int;
#else
mv[0].as_int = 0;
@@ -2095,7 +2213,6 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
#endif // CONFIG_GLOBAL_MOTION
break;
}
-#endif // CONFIG_EXT_INTER
default: { return 0; }
}
return ret;
@@ -2120,7 +2237,7 @@ static int read_is_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd,
}
}
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
static int read_is_inter_singleref_comp_mode(AV1_COMMON *const cm,
MACROBLOCKD *const xd,
int segment_id, aom_reader *r) {
@@ -2134,7 +2251,7 @@ static int read_is_inter_singleref_comp_mode(AV1_COMMON *const cm,
if (counts) ++counts->comp_inter_mode[ctx][is_singleref_comp_mode];
return is_singleref_comp_mode;
}
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
static void fpm_sync(void *const data, int mi_row) {
AV1Decoder *const pbi = (AV1Decoder *)data;
@@ -2143,8 +2260,8 @@ static void fpm_sync(void *const data, int mi_row) {
}
#if DEC_MISMATCH_DEBUG
-static void dec_dump_logs(AV1_COMMON *cm, MODE_INFO *const mi,
- MACROBLOCKD *const xd, int mi_row, int mi_col,
+static void dec_dump_logs(AV1_COMMON *cm, MODE_INFO *const mi, int mi_row,
+ int mi_col,
int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES],
int16_t mode_ctx) {
int_mv mv[2] = { { 0 } };
@@ -2153,22 +2270,6 @@ static void dec_dump_logs(AV1_COMMON *cm, MODE_INFO *const mi,
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref)
mv[ref].as_mv = mbmi->mv[ref].as_mv;
- int interp_ctx[2] = { -1 };
- int interp_filter[2] = { cm->interp_filter };
- if (cm->interp_filter == SWITCHABLE) {
- int dir;
- for (dir = 0; dir < 2; ++dir) {
- if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
- (mbmi->ref_frame[1] > INTRA_FRAME &&
- has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
- interp_ctx[dir] = av1_get_pred_context_switchable_interp(xd, dir);
- interp_filter[dir] = mbmi->interp_filter[dir];
- } else {
- interp_filter[dir] = EIGHTTAP_REGULAR;
- }
- }
- }
-
const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
int16_t zeromv_ctx = -1;
int16_t refmv_ctx = -1;
@@ -2185,20 +2286,18 @@ static void dec_dump_logs(AV1_COMMON *cm, MODE_INFO *const mi,
int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
#define FRAME_TO_CHECK 1
- if (cm->current_video_frame == FRAME_TO_CHECK /*&& cm->show_frame == 0*/) {
+ if (cm->current_video_frame == FRAME_TO_CHECK && cm->show_frame == 1) {
printf(
"=== DECODER ===: "
"Frame=%d, (mi_row,mi_col)=(%d,%d), mode=%d, bsize=%d, "
"show_frame=%d, mv[0]=(%d,%d), mv[1]=(%d,%d), ref[0]=%d, "
"ref[1]=%d, motion_mode=%d, inter_mode_ctx=%d, mode_ctx=%d, "
- "interp_ctx=(%d,%d), interp_filter=(%d,%d), newmv_ctx=%d, "
- "zeromv_ctx=%d, refmv_ctx=%d\n",
+ "newmv_ctx=%d, zeromv_ctx=%d, refmv_ctx=%d\n",
cm->current_video_frame, mi_row, mi_col, mbmi->mode, mbmi->sb_type,
cm->show_frame, mv[0].as_mv.row, mv[0].as_mv.col, mv[1].as_mv.row,
mv[1].as_mv.col, mbmi->ref_frame[0], mbmi->ref_frame[1],
- mbmi->motion_mode, inter_mode_ctx[ref_frame_type], mode_ctx,
- interp_ctx[0], interp_ctx[1], interp_filter[0], interp_filter[1],
- newmv_ctx, zeromv_ctx, refmv_ctx);
+ mbmi->motion_mode, inter_mode_ctx[ref_frame_type], mode_ctx, newmv_ctx,
+ zeromv_ctx, refmv_ctx);
}
}
#endif // DEC_MISMATCH_DEBUG
@@ -2206,8 +2305,7 @@ static void dec_dump_logs(AV1_COMMON *cm, MODE_INFO *const mi,
static void read_inter_block_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd,
MODE_INFO *const mi,
-#if (CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION || CONFIG_EXT_INTER) && \
- CONFIG_SUPERTX
+#if CONFIG_SUPERTX
int mi_row, int mi_col, aom_reader *r,
int supertx_enabled) {
#else
@@ -2221,13 +2319,11 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
int_mv nearestmv[2], nearmv[2];
int_mv ref_mvs[MODE_CTX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
int ref, is_compound;
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
int is_singleref_comp_mode = 0;
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES];
-#if CONFIG_EXT_INTER
int16_t compound_inter_mode_ctx[MODE_CTX_REF_FRAMES];
-#endif // CONFIG_EXT_INTER
int16_t mode_ctx = 0;
#if CONFIG_WARPED_MOTION
int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE];
@@ -2239,10 +2335,9 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
assert(NELEMENTS(mode_2_counter) == MB_MODE_COUNT);
-#if CONFIG_PALETTE
+ mbmi->uv_mode = UV_DC_PRED;
mbmi->palette_mode_info.palette_size[0] = 0;
mbmi->palette_mode_info.palette_size[1] = 0;
-#endif // CONFIG_PALETTE
memset(ref_mvs, 0, sizeof(ref_mvs));
@@ -2258,30 +2353,25 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // !USE_UNI_COMP_REFS
#endif // CONFIG_EXT_COMP_REFS
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (!is_compound)
is_singleref_comp_mode =
read_is_inter_singleref_comp_mode(cm, xd, mbmi->segment_id, r);
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
for (ref = 0; ref < 1 + is_compound; ++ref) {
MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
- av1_find_mv_refs(
- cm, xd, mi, frame, &xd->ref_mv_count[frame], xd->ref_mv_stack[frame],
-#if CONFIG_EXT_INTER
- compound_inter_mode_ctx,
-#endif // CONFIG_EXT_INTER
- ref_mvs[frame], mi_row, mi_col, fpm_sync, (void *)pbi, inter_mode_ctx);
+ av1_find_mv_refs(cm, xd, mi, frame, &xd->ref_mv_count[frame],
+ xd->ref_mv_stack[frame], compound_inter_mode_ctx,
+ ref_mvs[frame], mi_row, mi_col, fpm_sync, (void *)pbi,
+ inter_mode_ctx);
}
if (is_compound) {
MV_REFERENCE_FRAME ref_frame = av1_ref_frame_type(mbmi->ref_frame);
av1_find_mv_refs(cm, xd, mi, ref_frame, &xd->ref_mv_count[ref_frame],
- xd->ref_mv_stack[ref_frame],
-#if CONFIG_EXT_INTER
- compound_inter_mode_ctx,
-#endif // CONFIG_EXT_INTER
+ xd->ref_mv_stack[ref_frame], compound_inter_mode_ctx,
ref_mvs[ref_frame], mi_row, mi_col, fpm_sync, (void *)pbi,
inter_mode_ctx);
@@ -2292,21 +2382,39 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#if CONFIG_GLOBAL_MOTION
zeromv[0].as_int = gm_get_motion_vector(&cm->global_motion[rf[0]],
cm->allow_high_precision_mv,
- bsize, mi_col, mi_row, 0)
+ bsize, mi_col, mi_row, 0
+#if CONFIG_AMVR
+ ,
+ cm->cur_frame_mv_precision_level
+#endif
+ )
.as_int;
- zeromv[1].as_int = (rf[1] != NONE_FRAME)
- ? gm_get_motion_vector(&cm->global_motion[rf[1]],
- cm->allow_high_precision_mv,
- bsize, mi_col, mi_row, 0)
- .as_int
- : 0;
+ zeromv[1].as_int =
+ (rf[1] != NONE_FRAME)
+ ? gm_get_motion_vector(&cm->global_motion[rf[1]],
+ cm->allow_high_precision_mv, bsize, mi_col,
+ mi_row, 0
+#if CONFIG_AMVR
+ ,
+ cm->cur_frame_mv_precision_level
+#endif
+ )
+ .as_int
+ : 0;
#else
zeromv[0].as_int = zeromv[1].as_int = 0;
#endif
for (ref = 0; ref < 2; ++ref) {
if (rf[ref] == NONE_FRAME) continue;
+#if CONFIG_AMVR
+ lower_mv_precision(&ref_mvs[rf[ref]][0].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+ lower_mv_precision(&ref_mvs[rf[ref]][1].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+#else
lower_mv_precision(&ref_mvs[rf[ref]][0].as_mv, allow_hp);
lower_mv_precision(&ref_mvs[rf[ref]][1].as_mv, allow_hp);
+#endif
if (ref_mvs[rf[ref]][0].as_int != zeromv[ref].as_int ||
ref_mvs[rf[ref]][1].as_int != zeromv[ref].as_int)
inter_mode_ctx[ref_frame] &= ~(1 << ALL_ZERO_FLAG_OFFSET);
@@ -2314,7 +2422,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
}
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
if (is_compound || is_singleref_comp_mode)
#else // !CONFIG_COMPOUND_SINGLEREF
@@ -2322,12 +2429,16 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_COMPOUND_SINGLEREF
mode_ctx = compound_inter_mode_ctx[mbmi->ref_frame[0]];
else
-#endif // CONFIG_EXT_INTER
mode_ctx =
av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame, bsize, -1);
mbmi->ref_mv_idx = 0;
+#if CONFIG_SEGMENT_ZEROMV
+ if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
+ segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_ZEROMV)) {
+#else
if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
+#endif
mbmi->mode = ZEROMV;
if (bsize < BLOCK_8X8 && !unify_bsize) {
aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
@@ -2336,7 +2447,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
} else {
if (bsize >= BLOCK_8X8 || unify_bsize) {
-#if CONFIG_EXT_INTER
if (is_compound)
mbmi->mode = read_inter_compound_mode(cm, xd, r, mode_ctx);
#if CONFIG_COMPOUND_SINGLEREF
@@ -2344,60 +2454,53 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
mbmi->mode = read_inter_singleref_comp_mode(xd, r, mode_ctx);
#endif // CONFIG_COMPOUND_SINGLEREF
else
-#endif // CONFIG_EXT_INTER
mbmi->mode = read_inter_mode(ec_ctx, xd, r, mode_ctx);
-#if CONFIG_EXT_INTER
if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV ||
#if CONFIG_COMPOUND_SINGLEREF
mbmi->mode == SR_NEW_NEWMV ||
#endif // CONFIG_COMPOUND_SINGLEREF
have_nearmv_in_inter_mode(mbmi->mode))
-#else // !CONFIG_EXT_INTER
- if (mbmi->mode == NEARMV || mbmi->mode == NEWMV)
-#endif // CONFIG_EXT_INTER
read_drl_idx(ec_ctx, xd, mbmi, r);
}
}
-#if CONFIG_EXT_INTER
- if ((bsize < BLOCK_8X8 && unify_bsize) ||
+ if ((bsize < BLOCK_8X8 && !unify_bsize) ||
(mbmi->mode != ZEROMV && mbmi->mode != ZERO_ZEROMV)) {
-#else
- if ((bsize < BLOCK_8X8 && !unify_bsize) || mbmi->mode != ZEROMV) {
-#endif // CONFIG_EXT_INTER
for (ref = 0; ref < 1 + is_compound; ++ref) {
+#if CONFIG_AMVR
+ av1_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
+ &nearestmv[ref], &nearmv[ref],
+ cm->cur_frame_mv_precision_level);
+#else
av1_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
&nearestmv[ref], &nearmv[ref]);
+#endif
}
}
-#if CONFIG_EXT_INTER
#if CONFIG_COMPOUND_SINGLEREF
if ((is_compound || is_singleref_comp_mode) &&
- (bsize >= BLOCK_8X8 || unify_bsize) && mbmi->mode != ZERO_ZEROMV) {
+ (bsize >= BLOCK_8X8 || unify_bsize) && mbmi->mode != ZERO_ZEROMV)
#else // !CONFIG_COMPOUND_SINGLEREF
if (is_compound && (bsize >= BLOCK_8X8 || unify_bsize) &&
- mbmi->mode != ZERO_ZEROMV) {
+ mbmi->mode != ZERO_ZEROMV)
#endif // CONFIG_COMPOUND_SINGLEREF
-#else // !CONFIG_EXT_INTER
- if (is_compound && (bsize >= BLOCK_8X8 || unify_bsize) &&
- mbmi->mode != NEWMV && mbmi->mode != ZEROMV) {
-#endif // CONFIG_EXT_INTER
+ {
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
-#if CONFIG_EXT_INTER
if (xd->ref_mv_count[ref_frame_type] > 0) {
-#else
- if (xd->ref_mv_count[ref_frame_type] == 1 && mbmi->mode == NEARESTMV) {
-#endif // CONFIG_EXT_INTER
-#if CONFIG_EXT_INTER
if (mbmi->mode == NEAREST_NEARESTMV) {
-#endif // CONFIG_EXT_INTER
nearestmv[0] = xd->ref_mv_stack[ref_frame_type][0].this_mv;
nearestmv[1] = xd->ref_mv_stack[ref_frame_type][0].comp_mv;
+#if CONFIG_AMVR
+ lower_mv_precision(&nearestmv[0].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+ lower_mv_precision(&nearestmv[1].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+#else
lower_mv_precision(&nearestmv[0].as_mv, allow_hp);
lower_mv_precision(&nearestmv[1].as_mv, allow_hp);
-#if CONFIG_EXT_INTER
+#endif
} else if (mbmi->mode == NEAREST_NEWMV
#if CONFIG_COMPOUND_SINGLEREF
|| mbmi->mode == SR_NEAREST_NEARMV
@@ -2405,15 +2508,24 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_COMPOUND_SINGLEREF
) {
nearestmv[0] = xd->ref_mv_stack[ref_frame_type][0].this_mv;
+
+#if CONFIG_AMVR
+ lower_mv_precision(&nearestmv[0].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+#else
lower_mv_precision(&nearestmv[0].as_mv, allow_hp);
+#endif
} else if (mbmi->mode == NEW_NEARESTMV) {
nearestmv[1] = xd->ref_mv_stack[ref_frame_type][0].comp_mv;
+#if CONFIG_AMVR
+ lower_mv_precision(&nearestmv[1].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+#else
lower_mv_precision(&nearestmv[1].as_mv, allow_hp);
+#endif
}
-#endif // CONFIG_EXT_INTER
}
-#if CONFIG_EXT_INTER
if (xd->ref_mv_count[ref_frame_type] > 1) {
int ref_mv_idx = 1 + mbmi->ref_mv_idx;
#if CONFIG_COMPOUND_SINGLEREF
@@ -2421,12 +2533,22 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_COMPOUND_SINGLEREF
if (compound_ref0_mode(mbmi->mode) == NEARMV) {
nearmv[0] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
- lower_mv_precision(&nearmv[0].as_mv, allow_hp);
+#if CONFIG_AMVR
+ lower_mv_precision(&nearmv[0].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+#else
+ lower_mv_precision(&nearmv[0].as_mv, allow_hp);
+#endif
}
if (compound_ref1_mode(mbmi->mode) == NEARMV) {
nearmv[1] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
- lower_mv_precision(&nearmv[1].as_mv, allow_hp);
+#if CONFIG_AMVR
+ lower_mv_precision(&nearmv[1].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+#else
+ lower_mv_precision(&nearmv[1].as_mv, allow_hp);
+#endif
}
#if CONFIG_COMPOUND_SINGLEREF
} else {
@@ -2439,15 +2561,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
#endif // CONFIG_COMPOUND_SINGLEREF
}
-#else // !CONFIG_EXT_INTER
- if (xd->ref_mv_count[ref_frame_type] > 1) {
- int ref_mv_idx = 1 + mbmi->ref_mv_idx;
- nearestmv[0] = xd->ref_mv_stack[ref_frame_type][0].this_mv;
- nearestmv[1] = xd->ref_mv_stack[ref_frame_type][0].comp_mv;
- nearmv[0] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
- nearmv[1] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
- }
-#endif // CONFIG_EXT_INTER
} else if (mbmi->ref_mv_idx > 0 && mbmi->mode == NEARMV) {
int_mv cur_mv =
xd->ref_mv_stack[mbmi->ref_frame[0]][1 + mbmi->ref_mv_idx].this_mv;
@@ -2464,72 +2577,58 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
int idx, idy;
PREDICTION_MODE b_mode;
int_mv nearest_sub8x8[2], near_sub8x8[2];
-#if CONFIG_EXT_INTER
int_mv ref_mv[2][2];
-#endif // CONFIG_EXT_INTER
for (idy = 0; idy < 2; idy += num_4x4_h) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
int_mv block[2];
const int j = idy * 2 + idx;
int_mv ref_mv_s8[2];
-#if CONFIG_EXT_INTER
if (!is_compound)
-#endif // CONFIG_EXT_INTER
mode_ctx = av1_mode_context_analyzer(inter_mode_ctx, mbmi->ref_frame,
bsize, j);
-#if CONFIG_EXT_INTER
if (is_compound)
b_mode = read_inter_compound_mode(cm, xd, r, mode_ctx);
else
-#endif // CONFIG_EXT_INTER
b_mode = read_inter_mode(ec_ctx, xd, r, mode_ctx);
-#if CONFIG_EXT_INTER
if (b_mode != ZEROMV && b_mode != ZERO_ZEROMV) {
-#else
- if (b_mode != ZEROMV) {
-#endif // CONFIG_EXT_INTER
CANDIDATE_MV ref_mv_stack[2][MAX_REF_MV_STACK_SIZE];
uint8_t ref_mv_count[2];
- for (ref = 0; ref < 1 + is_compound; ++ref)
-#if CONFIG_EXT_INTER
- {
+ for (ref = 0; ref < 1 + is_compound; ++ref) {
int_mv mv_ref_list[MAX_MV_REF_CANDIDATES];
av1_update_mv_context(cm, xd, mi, mbmi->ref_frame[ref], mv_ref_list,
j, mi_row, mi_col, NULL);
-#endif // CONFIG_EXT_INTER
av1_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
ref_mv_stack[ref], &ref_mv_count[ref],
-#if CONFIG_EXT_INTER
- mv_ref_list,
-#endif // CONFIG_EXT_INTER
- &nearest_sub8x8[ref],
+ mv_ref_list, &nearest_sub8x8[ref],
&near_sub8x8[ref]);
-#if CONFIG_EXT_INTER
if (have_newmv_in_inter_mode(b_mode)) {
mv_ref_list[0].as_int = nearest_sub8x8[ref].as_int;
mv_ref_list[1].as_int = near_sub8x8[ref].as_int;
+#if CONFIG_AMVR
+ av1_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
+ &ref_mv[1][ref],
+ cm->cur_frame_mv_precision_level);
+#else
av1_find_best_ref_mvs(allow_hp, mv_ref_list, &ref_mv[0][ref],
&ref_mv[1][ref]);
+#endif
}
}
-#endif // CONFIG_EXT_INTER
}
for (ref = 0; ref < 1 + is_compound && b_mode != ZEROMV; ++ref) {
ref_mv_s8[ref] = nearest_sub8x8[ref];
+#if CONFIG_AMVR
+ lower_mv_precision(&ref_mv_s8[ref].as_mv, allow_hp,
+ cm->cur_frame_mv_precision_level);
+#else
lower_mv_precision(&ref_mv_s8[ref].as_mv, allow_hp);
+#endif
}
-#if CONFIG_EXT_INTER
(void)ref_mv_s8;
-#endif
- if (!assign_mv(cm, xd, b_mode, mbmi->ref_frame, j, block,
-#if CONFIG_EXT_INTER
- ref_mv[0],
-#else // !CONFIG_EXT_INTER
- ref_mv_s8,
-#endif // CONFIG_EXT_INTER
+ if (!assign_mv(cm, xd, b_mode, mbmi->ref_frame, j, block, ref_mv[0],
nearest_sub8x8, near_sub8x8, mi_row, mi_col, is_compound,
allow_hp, r)) {
aom_merge_corrupted_flag(&xd->corrupted, 1);
@@ -2556,7 +2655,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
ref_mv[0] = nearestmv[0];
ref_mv[1] = nearestmv[1];
-#if CONFIG_EXT_INTER
if (is_compound) {
int ref_mv_idx = mbmi->ref_mv_idx;
// Special case: NEAR_NEWMV and NEW_NEARMV modes use
@@ -2604,7 +2702,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
#endif // CONFIG_COMPOUND_SINGLEREF
} else {
-#endif // CONFIG_EXT_INTER
if (mbmi->mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref) {
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
@@ -2620,9 +2717,7 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
nearestmv[ref] = ref_mv[ref];
}
}
-#if CONFIG_EXT_INTER
}
-#endif // CONFIG_EXT_INTER
int mv_corrupted_flag =
!assign_mv(cm, xd, mbmi->mode, mbmi->ref_frame, 0, mbmi->mv, ref_mv,
@@ -2630,7 +2725,7 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
aom_merge_corrupted_flag(&xd->corrupted, mv_corrupted_flag);
}
-#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_INTERINTRA
mbmi->use_wedge_interintra = 0;
if (cm->reference_mode != COMPOUND_REFERENCE &&
#if CONFIG_SUPERTX
@@ -2681,7 +2776,7 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
}
}
-#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#endif // CONFIG_INTERINTRA
#if CONFIG_WARPED_MOTION
for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
@@ -2710,18 +2805,16 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#if CONFIG_SUPERTX
if (!supertx_enabled) {
#endif // CONFIG_SUPERTX
-#if CONFIG_EXT_INTER
if (mbmi->ref_frame[1] != INTRA_FRAME)
-#endif // CONFIG_EXT_INTER
mbmi->motion_mode = read_motion_mode(cm, xd, mi, r);
#if CONFIG_NCOBMC_ADAPT_WEIGHT
read_ncobmc_mode(xd, mi, mbmi->ncobmc_mode, r);
#endif
-#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#if CONFIG_COMPOUND_SINGLEREF
if (is_singleref_comp_mode) assert(mbmi->motion_mode == SIMPLE_TRANSLATION);
-#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+#endif // CONFIG_COMPOUND_SINGLEREF
#if CONFIG_WARPED_MOTION
if (mbmi->motion_mode == WARPED_CAUSAL) {
mbmi->wm_params[0].wmtype = DEFAULT_WMTYPE;
@@ -2744,7 +2837,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_SUPERTX
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
-#if CONFIG_EXT_INTER
mbmi->interinter_compound_type = COMPOUND_AVERAGE;
if (
#if CONFIG_COMPOUND_SINGLEREF
@@ -2760,10 +2852,17 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
if (is_any_masked_compound_used(bsize)) {
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
if (cm->allow_masked_compound) {
- mbmi->interinter_compound_type = aom_read_symbol(
- r, ec_ctx->compound_type_cdf[bsize], COMPOUND_TYPES, ACCT_STR);
+#if CONFIG_WEDGE && CONFIG_COMPOUND_SEGMENT
+ if (!is_interinter_compound_used(COMPOUND_WEDGE, bsize))
+ mbmi->interinter_compound_type =
+ aom_read_bit(r, ACCT_STR) ? COMPOUND_AVERAGE : COMPOUND_SEG;
+ else
+#endif // CONFIG_WEDGE && CONFIG_COMPOUND_SEGMENT
+ mbmi->interinter_compound_type = aom_read_symbol(
+ r, ec_ctx->compound_type_cdf[bsize], COMPOUND_TYPES, ACCT_STR);
#if CONFIG_WEDGE
if (mbmi->interinter_compound_type == COMPOUND_WEDGE) {
+ assert(is_interinter_compound_used(COMPOUND_WEDGE, bsize));
mbmi->wedge_index =
aom_read_literal(r, get_wedge_bits_lookup(bsize), ACCT_STR);
mbmi->wedge_sign = aom_read_bit(r, ACCT_STR);
@@ -2782,15 +2881,13 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
if (xd->counts)
xd->counts->compound_interinter[bsize][mbmi->interinter_compound_type]++;
}
-#endif // CONFIG_EXT_INTER
#if CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
read_mb_interp_filter(cm, xd, mbmi, r);
#endif // CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION
#if DEC_MISMATCH_DEBUG
- // NOTE(zoeliu): For debug
- dec_dump_logs(cm, mi, xd, mi_row, mi_col, inter_mode_ctx, mode_ctx);
+ dec_dump_logs(cm, mi, mi_row, mi_col, inter_mode_ctx, mode_ctx);
#endif // DEC_MISMATCH_DEBUG
}
@@ -2816,7 +2913,6 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
#endif // CONFIG_SUPERTX
mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
-#if CONFIG_DELTA_Q
if (cm->delta_q_present_flag) {
xd->current_qindex =
xd->prev_qindex +
@@ -2826,15 +2922,34 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
xd->prev_qindex = xd->current_qindex;
#if CONFIG_EXT_DELTA_Q
if (cm->delta_lf_present_flag) {
- mbmi->current_delta_lf_from_base = xd->current_delta_lf_from_base =
+#if CONFIG_LOOPFILTER_LEVEL
+ if (cm->delta_lf_multi) {
+ for (int lf_id = 0; lf_id < FRAME_LF_COUNT; ++lf_id) {
+ mbmi->curr_delta_lf[lf_id] = xd->curr_delta_lf[lf_id] =
+ xd->prev_delta_lf[lf_id] +
+ read_delta_lflevel(cm, xd, r, lf_id, mbmi, mi_col, mi_row) *
+ cm->delta_lf_res;
+ xd->prev_delta_lf[lf_id] = xd->curr_delta_lf[lf_id];
+ }
+ } else {
+ mbmi->current_delta_lf_from_base = xd->current_delta_lf_from_base =
+ xd->prev_delta_lf_from_base +
+ read_delta_lflevel(cm, xd, r, -1, mbmi, mi_col, mi_row) *
+ cm->delta_lf_res;
+ xd->prev_delta_lf_from_base = xd->current_delta_lf_from_base;
+ }
+#else
+ const int current_delta_lf_from_base =
xd->prev_delta_lf_from_base +
read_delta_lflevel(cm, xd, r, mbmi, mi_col, mi_row) *
cm->delta_lf_res;
+ mbmi->current_delta_lf_from_base = xd->current_delta_lf_from_base =
+ clamp(current_delta_lf_from_base, 0, MAX_LOOP_FILTER);
xd->prev_delta_lf_from_base = xd->current_delta_lf_from_base;
+#endif // CONFIG_LOOPFILTER_LEVEL
}
#endif
}
-#endif
#if CONFIG_SUPERTX
if (!supertx_enabled) {
@@ -2853,27 +2968,34 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
#else
bsize >= BLOCK_8X8 &&
#endif
- !mbmi->skip && inter_block) {
+ !mbmi->skip && inter_block && !xd->lossless[mbmi->segment_id]) {
const TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize];
const int bh = tx_size_high_unit[max_tx_size];
const int bw = tx_size_wide_unit[max_tx_size];
const int width = block_size_wide[bsize] >> tx_size_wide_log2[0];
const int height = block_size_high[bsize] >> tx_size_wide_log2[0];
int idx, idy;
+ int init_depth =
+ (height != width) ? RECT_VARTX_DEPTH_INIT : SQR_VARTX_DEPTH_INIT;
mbmi->min_tx_size = TX_SIZES_ALL;
for (idy = 0; idy < height; idy += bh)
for (idx = 0; idx < width; idx += bw)
- read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size,
- height != width, idy, idx, r);
+ read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size, init_depth,
+ idy, idx, r);
#if CONFIG_RECT_TX_EXT
if (is_quarter_tx_allowed(xd, mbmi, inter_block) &&
mbmi->tx_size == max_tx_size) {
int quarter_tx;
if (quarter_txsize_lookup[bsize] != max_tx_size) {
+#if CONFIG_NEW_MULTISYMBOL
+ quarter_tx =
+ aom_read_symbol(r, cm->fc->quarter_tx_size_cdf, 2, ACCT_STR);
+#else
quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
if (xd->counts) ++xd->counts->quarter_tx_size[quarter_tx];
+#endif
} else {
quarter_tx = 1;
}
@@ -2920,9 +3042,7 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
if (inter_block)
read_inter_block_mode_info(pbi, xd,
-#if (CONFIG_MOTION_VAR || CONFIG_EXT_INTER || CONFIG_WARPED_MOTION) && \
- CONFIG_SUPERTX
-
+#if CONFIG_SUPERTX
mi, mi_row, mi_col, r, supertx_enabled);
#else
mi, mi_row, mi_col, r);
@@ -2939,6 +3059,34 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
#endif // !CONFIG_TXK_SEL
}
+static void av1_intra_copy_frame_mvs(AV1_COMMON *const cm, int mi_row,
+ int mi_col, int x_mis, int y_mis) {
+#if CONFIG_TMV
+ const int frame_mvs_stride = ROUND_POWER_OF_TWO(cm->mi_cols, 1);
+ MV_REF *frame_mvs = cm->cur_frame->mvs +
+ ((mi_row & 0xfffe) >> 1) * frame_mvs_stride +
+ ((mi_col & 0xfffe) >> 1);
+ x_mis = ROUND_POWER_OF_TWO(x_mis, 1);
+ y_mis = ROUND_POWER_OF_TWO(y_mis, 1);
+#else
+ const int frame_mvs_stride = cm->mi_cols;
+ MV_REF *frame_mvs = cm->cur_frame->mvs +
+ (mi_row & 0xfffe) * frame_mvs_stride + (mi_col & 0xfffe);
+ x_mis = AOMMAX(x_mis, 2);
+ y_mis = AOMMAX(y_mis, 2);
+#endif // CONFIG_TMV
+ int w, h;
+
+ for (h = 0; h < y_mis; h++) {
+ MV_REF *const frame_mv = frame_mvs + h * frame_mvs_stride;
+ for (w = 0; w < x_mis; w++) {
+ MV_REF *const mv = frame_mv + w;
+ mv->ref_frame[0] = NONE_FRAME;
+ mv->ref_frame[1] = NONE_FRAME;
+ }
+ }
+}
+
void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERTX
int supertx_enabled,
@@ -2947,40 +3095,19 @@ void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd,
int y_mis) {
AV1_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
- MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
- int w, h;
-
#if CONFIG_INTRABC
mi->mbmi.use_intrabc = 0;
#endif // CONFIG_INTRABC
if (frame_is_intra_only(cm)) {
read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r);
- for (h = 0; h < y_mis; ++h) {
- MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
- for (w = 0; w < x_mis; ++w) {
- MV_REF *const mv = frame_mv + w;
- mv->ref_frame[0] = NONE_FRAME;
- mv->ref_frame[1] = NONE_FRAME;
- }
- }
+ av1_intra_copy_frame_mvs(cm, mi_row, mi_col, x_mis, y_mis);
} else {
read_inter_frame_mode_info(pbi, xd,
#if CONFIG_SUPERTX
supertx_enabled,
#endif // CONFIG_SUPERTX
mi_row, mi_col, r);
- for (h = 0; h < y_mis; ++h) {
- MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
- for (w = 0; w < x_mis; ++w) {
- MV_REF *const mv = frame_mv + w;
- mv->ref_frame[0] = mi->mbmi.ref_frame[0];
- mv->ref_frame[1] = mi->mbmi.ref_frame[1];
- mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
- mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
- mv->pred_mv[0].as_int = mi->mbmi.pred_mv[0].as_int;
- mv->pred_mv[1].as_int = mi->mbmi.pred_mv[1].as_int;
- }
- }
+ av1_copy_frame_mvs(cm, mi, mi_row, mi_col, x_mis, y_mis);
}
}
diff --git a/third_party/aom/av1/decoder/decoder.c b/third_party/aom/av1/decoder/decoder.c
index 3998c20ee..cd82d5b53 100644
--- a/third_party/aom/av1/decoder/decoder.c
+++ b/third_party/aom/av1/decoder/decoder.c
@@ -33,7 +33,9 @@
#include "av1/decoder/decodeframe.h"
#include "av1/decoder/decoder.h"
-
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+#include "av1/common/ncobmc_kernels.h"
+#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
#if !CONFIG_PVQ
#include "av1/decoder/detokenize.h"
#endif
@@ -46,23 +48,8 @@ static void initialize_dec(void) {
aom_dsp_rtcd();
aom_scale_rtcd();
av1_init_intra_predictors();
-#if CONFIG_EXT_INTER
av1_init_wedge_masks();
-#endif // CONFIG_EXT_INTER
init_done = 1;
- av1_indices_from_tree(av1_switchable_interp_ind, av1_switchable_interp_inv,
- av1_switchable_interp_tree);
-#if CONFIG_EXT_TX
- int s;
- for (s = 1; s < EXT_TX_SETS_INTRA; ++s)
- av1_indices_from_tree(av1_ext_tx_intra_ind[s], av1_ext_tx_intra_inv[s],
- av1_ext_tx_intra_tree[s]);
- for (s = 1; s < EXT_TX_SETS_INTER; ++s)
- av1_indices_from_tree(av1_ext_tx_inter_ind[s], av1_ext_tx_inter_inv[s],
- av1_ext_tx_inter_tree[s]);
-#else
- av1_indices_from_tree(av1_ext_tx_ind, av1_ext_tx_inv, av1_ext_tx_tree);
-#endif
}
}
@@ -133,6 +120,10 @@ AV1Decoder *av1_decoder_create(BufferPool *const pool) {
av1_loop_filter_init(cm);
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+ get_default_ncobmc_kernels(cm);
+#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
+
#if CONFIG_AOM_QM
aom_qm_init(cm);
#endif
@@ -184,107 +175,36 @@ static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
a->uv_height == b->uv_height && a->uv_width == b->uv_width;
}
-aom_codec_err_t av1_copy_reference_dec(AV1Decoder *pbi,
- AOM_REFFRAME ref_frame_flag,
+aom_codec_err_t av1_copy_reference_dec(AV1Decoder *pbi, int idx,
YV12_BUFFER_CONFIG *sd) {
AV1_COMMON *cm = &pbi->common;
- /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
- * encoder is using the frame buffers for. This is just a stub to keep the
- * aomenc --test-decode functionality working, and will be replaced in a
- * later commit that adds AV1-specific controls for this functionality.
- */
- if (ref_frame_flag == AOM_LAST_FLAG) {
- const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
- if (cfg == NULL) {
- aom_internal_error(&cm->error, AOM_CODEC_ERROR,
- "No 'last' reference frame");
- return AOM_CODEC_ERROR;
- }
- if (!equal_dimensions(cfg, sd))
- aom_internal_error(&cm->error, AOM_CODEC_ERROR,
- "Incorrect buffer dimensions");
- else
- aom_yv12_copy_frame(cfg, sd);
- } else {
- aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
+ const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, idx);
+ if (cfg == NULL) {
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR, "No reference frame");
+ return AOM_CODEC_ERROR;
}
+ if (!equal_dimensions(cfg, sd))
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR,
+ "Incorrect buffer dimensions");
+ else
+ aom_yv12_copy_frame(cfg, sd);
return cm->error.error_code;
}
-aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
- AOM_REFFRAME ref_frame_flag,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm, int idx,
YV12_BUFFER_CONFIG *sd) {
- int idx;
YV12_BUFFER_CONFIG *ref_buf = NULL;
- // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
- // encoder is using the frame buffers for. This is just a stub to keep the
- // aomenc --test-decode functionality working, and will be replaced in a
- // later commit that adds AV1-specific controls for this functionality.
-
- // (Yunqing) The set_reference control depends on the following setting in
- // encoder.
- // cpi->lst_fb_idx = 0;
- // #if CONFIG_EXT_REFS
- // cpi->lst2_fb_idx = 1;
- // cpi->lst3_fb_idx = 2;
- // cpi->gld_fb_idx = 3;
- // cpi->bwd_fb_idx = 4;
- // #if CONFIG_ALTREF2
- // cpi->alt2_fb_idx = 5;
- // cpi->alt_fb_idx = 6;
- // #else // !CONFIG_ALTREF2
- // cpi->alt_fb_idx = 5;
- // #endif // CONFIG_ALTREF2
- // #else // CONFIG_EXT_REFS
- // cpi->gld_fb_idx = 1;
- // cpi->alt_fb_idx = 2;
- // #endif // CONFIG_EXT_REFS
-
- // TODO(zoeliu): To revisit following code and reconsider what assumption we
- // may take on the reference frame buffer virtual indexes
- if (ref_frame_flag == AOM_LAST_FLAG) {
- idx = cm->ref_frame_map[0];
-#if CONFIG_EXT_REFS
- } else if (ref_frame_flag == AOM_LAST2_FLAG) {
- idx = cm->ref_frame_map[1];
- } else if (ref_frame_flag == AOM_LAST3_FLAG) {
- idx = cm->ref_frame_map[2];
- } else if (ref_frame_flag == AOM_GOLD_FLAG) {
- idx = cm->ref_frame_map[3];
- } else if (ref_frame_flag == AOM_BWD_FLAG) {
- idx = cm->ref_frame_map[4];
-#if CONFIG_ALTREF2
- } else if (ref_frame_flag == AOM_ALT2_FLAG) {
- idx = cm->ref_frame_map[5];
- } else if (ref_frame_flag == AOM_ALT_FLAG) {
- idx = cm->ref_frame_map[6];
-#else // !CONFIG_ALTREF2
- } else if (ref_frame_flag == AOM_ALT_FLAG) {
- idx = cm->ref_frame_map[5];
-#endif // CONFIG_ALTREF2
-#else // !CONFIG_EXT_REFS
- } else if (ref_frame_flag == AOM_GOLD_FLAG) {
- idx = cm->ref_frame_map[1];
- } else if (ref_frame_flag == AOM_ALT_FLAG) {
- idx = cm->ref_frame_map[2];
-#endif // CONFIG_EXT_REFS
- } else {
- aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid reference frame");
- return cm->error.error_code;
- }
+ // Get the destination reference buffer.
+ ref_buf = get_ref_frame(cm, idx);
- if (idx < 0 || idx >= FRAME_BUFFERS) {
- aom_internal_error(&cm->error, AOM_CODEC_ERROR,
- "Invalid reference frame map");
- return cm->error.error_code;
+ if (ref_buf == NULL) {
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR, "No reference frame");
+ return AOM_CODEC_ERROR;
}
- // Get the destination reference buffer.
- ref_buf = &cm->buffer_pool->frame_bufs[idx].buf;
-
if (!equal_dimensions(ref_buf, sd)) {
aom_internal_error(&cm->error, AOM_CODEC_ERROR,
"Incorrect buffer dimensions");
@@ -444,7 +364,16 @@ int av1_receive_compressed_data(AV1Decoder *pbi, size_t size,
}
cm->error.setjmp = 1;
- av1_decode_frame(pbi, source, source + size, psource);
+
+#if !CONFIG_OBU
+ av1_decode_frame_headers_and_setup(pbi, source, source + size, psource);
+ if (!cm->show_existing_frame) {
+ av1_decode_tg_tiles_and_wrapup(pbi, source, source + size, psource, 0,
+ cm->tile_rows * cm->tile_cols - 1, 1);
+ }
+#else
+ av1_decode_frame_from_obus(pbi, source, source + size, psource);
+#endif
swap_frame_buffers(pbi);
@@ -492,6 +421,8 @@ int av1_receive_compressed_data(AV1Decoder *pbi, size_t size,
} else {
cm->last_width = cm->width;
cm->last_height = cm->height;
+ cm->last_tile_cols = cm->tile_cols;
+ cm->last_tile_rows = cm->tile_rows;
if (cm->show_frame) {
cm->current_video_frame++;
}
diff --git a/third_party/aom/av1/decoder/decoder.h b/third_party/aom/av1/decoder/decoder.h
index 5e6afc2dc..20129b669 100644
--- a/third_party/aom/av1/decoder/decoder.h
+++ b/third_party/aom/av1/decoder/decoder.h
@@ -54,9 +54,10 @@ typedef struct TileData {
CFL_CTX cfl;
#endif
DECLARE_ALIGNED(16, FRAME_CONTEXT, tctx);
-#if CONFIG_PALETTE
DECLARE_ALIGNED(16, uint8_t, color_index_map[2][MAX_SB_SQUARE]);
-#endif // CONFIG_PALETTE
+#if CONFIG_MRC_TX
+ DECLARE_ALIGNED(16, uint8_t, mrc_mask[MAX_SB_SQUARE]);
+#endif // CONFIG_MRC_TX
} TileData;
typedef struct TileWorkerData {
@@ -74,9 +75,10 @@ typedef struct TileWorkerData {
CFL_CTX cfl;
#endif
FRAME_CONTEXT tctx;
-#if CONFIG_PALETTE
DECLARE_ALIGNED(16, uint8_t, color_index_map[2][MAX_SB_SQUARE]);
-#endif // CONFIG_PALETTE
+#if CONFIG_MRC_TX
+ DECLARE_ALIGNED(16, uint8_t, mrc_mask[MAX_SB_SQUARE]);
+#endif // CONFIG_MRC_TX
struct aom_internal_error_info error_info;
} TileWorkerData;
@@ -138,9 +140,6 @@ typedef struct AV1Decoder {
int tg_size; // Number of tiles in the current tilegroup
int tg_start; // First tile in the current tilegroup
int tg_size_bit_offset;
-#if CONFIG_REFERENCE_BUFFER
- SequenceHeader seq_params;
-#endif
#if CONFIG_INSPECTION
aom_inspect_cb inspect_cb;
void *inspect_ctx;
@@ -154,12 +153,10 @@ int av1_get_raw_frame(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd);
int av1_get_frame_to_show(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *frame);
-aom_codec_err_t av1_copy_reference_dec(struct AV1Decoder *pbi,
- AOM_REFFRAME ref_frame_flag,
+aom_codec_err_t av1_copy_reference_dec(struct AV1Decoder *pbi, int idx,
YV12_BUFFER_CONFIG *sd);
-aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
- AOM_REFFRAME ref_frame_flag,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm, int idx,
YV12_BUFFER_CONFIG *sd);
static INLINE uint8_t read_marker(aom_decrypt_cb decrypt_cb,
@@ -213,7 +210,6 @@ static INLINE int dec_is_ref_frame_buf(AV1Decoder *const pbi,
}
#endif // CONFIG_EXT_REFS
-#if CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
#define ACCT_STR __func__
static INLINE int av1_read_uniform(aom_reader *r, int n) {
const int l = get_unsigned_bits(n);
@@ -225,7 +221,6 @@ static INLINE int av1_read_uniform(aom_reader *r, int n) {
else
return (v << 1) - m + aom_read_literal(r, 1, ACCT_STR);
}
-#endif // CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
#ifdef __cplusplus
} // extern "C"
diff --git a/third_party/aom/av1/decoder/decodetxb.c b/third_party/aom/av1/decoder/decodetxb.c
index 6e38427b3..13f944b35 100644
--- a/third_party/aom/av1/decoder/decodetxb.c
+++ b/third_party/aom/av1/decoder/decodetxb.c
@@ -15,16 +15,20 @@
#include "av1/decoder/decodemv.h"
#include "av1/decoder/decodetxb.h"
#include "av1/decoder/dsubexp.h"
+#include "av1/decoder/symbolrate.h"
#define ACCT_STR __func__
-static int read_golomb(MACROBLOCKD *xd, aom_reader *r) {
+static int read_golomb(MACROBLOCKD *xd, aom_reader *r, FRAME_COUNTS *counts) {
+#if !CONFIG_SYMBOLRATE
+ (void)counts;
+#endif
int x = 1;
int length = 0;
int i = 0;
while (!i) {
- i = aom_read_bit(r, ACCT_STR);
+ i = av1_read_record_bit(counts, r, ACCT_STR);
++length;
if (length >= 32) {
aom_internal_error(xd->error_info, AOM_CODEC_CORRUPT_FRAME,
@@ -35,21 +39,247 @@ static int read_golomb(MACROBLOCKD *xd, aom_reader *r) {
for (i = 0; i < length - 1; ++i) {
x <<= 1;
- x += aom_read_bit(r, ACCT_STR);
+ x += av1_read_record_bit(counts, r, ACCT_STR);
}
return x - 1;
}
+static INLINE int read_nz_map(aom_reader *r, tran_low_t *tcoeffs, int plane,
+ const int16_t *scan, TX_SIZE tx_size,
+ TX_TYPE tx_type, FRAME_CONTEXT *fc,
+ FRAME_COUNTS *counts) {
+ TX_SIZE txs_ctx = get_txsize_context(tx_size);
+ const int bwl = b_width_log2_lookup[txsize_to_bsize[tx_size]] + 2;
+ const int height = tx_size_high[tx_size];
+#if CONFIG_CTX1D
+ const int width = tx_size_wide[tx_size];
+ const int eob_offset = width + height;
+ const TX_CLASS tx_class = get_tx_class(tx_type);
+ const int seg_eob =
+ (tx_class == TX_CLASS_2D) ? tx_size_2d[tx_size] : eob_offset;
+#else
+ const int seg_eob = tx_size_2d[tx_size];
+#endif
+ const PLANE_TYPE plane_type = get_plane_type(plane);
+ unsigned int(*nz_map_count)[SIG_COEF_CONTEXTS][2] =
+ (counts) ? &counts->nz_map[txs_ctx][plane_type] : NULL;
+#if !LV_MAP_PROB
+ aom_prob *nz_map = fc->nz_map[txs_ctx][plane_type];
+ aom_prob *eob_flag = fc->eob_flag[txs_ctx][plane_type];
+#endif
+ int c;
+ for (c = 0; c < seg_eob; ++c) {
+ int is_nz;
+ int coeff_ctx = get_nz_map_ctx(tcoeffs, c, scan, bwl, height, tx_type);
+ int eob_ctx = get_eob_ctx(tcoeffs, scan[c], txs_ctx, tx_type);
+
+ if (c < seg_eob - 1) {
+#if LV_MAP_PROB
+ is_nz = av1_read_record_bin(
+ counts, r, fc->nz_map_cdf[txs_ctx][plane_type][coeff_ctx], 2,
+ ACCT_STR);
+#else
+ is_nz = aom_read(r, nz_map[coeff_ctx], ACCT_STR);
+#endif
+ } else {
+ is_nz = 1;
+ }
+
+ // set non-zero coefficient map.
+ tcoeffs[scan[c]] = is_nz;
+
+ if (c == seg_eob - 1) {
+ ++c;
+ break;
+ }
+
+ if (counts) ++(*nz_map_count)[coeff_ctx][is_nz];
+
+ if (is_nz) {
+#if LV_MAP_PROB
+ int is_eob = av1_read_record_bin(
+ counts, r, fc->eob_flag_cdf[txs_ctx][plane_type][eob_ctx], 2,
+ ACCT_STR);
+#else
+ int is_eob = aom_read(r, eob_flag[eob_ctx], ACCT_STR);
+#endif
+ if (counts) ++counts->eob_flag[txs_ctx][plane_type][eob_ctx][is_eob];
+ if (is_eob) break;
+ }
+ }
+ return AOMMIN(seg_eob, c + 1);
+}
+
+#if CONFIG_CTX1D
+static INLINE int read_nz_map_vert(aom_reader *r, tran_low_t *tcoeffs,
+ int plane, const int16_t *scan,
+ const int16_t *iscan, TX_SIZE tx_size,
+ TX_TYPE tx_type, FRAME_CONTEXT *fc,
+ FRAME_COUNTS *counts) {
+ const TX_SIZE txs_ctx = get_txsize_context(tx_size);
+ const PLANE_TYPE plane_type = get_plane_type(plane);
+ const TX_CLASS tx_class = get_tx_class(tx_type);
+ const int bwl = b_width_log2_lookup[txsize_to_bsize[tx_size]] + 2;
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ int16_t eob_ls[MAX_HVTX_SIZE];
+ int eob = 0;
+#if !LV_MAP_PROB
+ aom_prob *nz_map = fc->nz_map[txs_ctx][plane_type];
+#endif
+ for (int col = 0; col < width; ++col) {
+ int el_ctx = get_empty_line_ctx(col, eob_ls);
+#if LV_MAP_PROB
+ int empty_line = av1_read_record_bin(
+ counts, r, fc->empty_line_cdf[txs_ctx][plane_type][tx_class][el_ctx], 2,
+ ACCT_STR);
+#else
+ int empty_line = aom_read(
+ r, fc->empty_line[txs_ctx][plane_type][tx_class][el_ctx], ACCT_STR);
+#endif
+ if (counts)
+ ++counts->empty_line[txs_ctx][plane_type][tx_class][el_ctx][empty_line];
+ if (!empty_line) {
+ int row;
+ for (row = 0; row < height; ++row) {
+ if (row + 1 != height) {
+ int coeff_idx = row * width + col;
+ int scan_idx = iscan[coeff_idx];
+ int coeff_ctx =
+ get_nz_map_ctx(tcoeffs, scan_idx, scan, bwl, height, tx_type);
+#if LV_MAP_PROB
+ int is_nz = av1_read_record_bin(
+ counts, r, fc->nz_map_cdf[txs_ctx][plane_type][coeff_ctx], 2,
+ ACCT_STR);
+#else
+ int is_nz = aom_read(r, nz_map[coeff_ctx], ACCT_STR);
+#endif
+ if (counts) ++counts->nz_map[txs_ctx][plane_type][coeff_ctx][is_nz];
+ tcoeffs[coeff_idx] = is_nz;
+ if (is_nz) {
+ eob = AOMMAX(eob, iscan[coeff_idx] + 1);
+ if (row + 1 != height) {
+ int eob_ctx = get_hv_eob_ctx(col, row, eob_ls);
+#if LV_MAP_PROB
+ int is_eob = av1_read_record_bin(
+ counts, r,
+ fc->hv_eob_cdf[txs_ctx][plane_type][tx_class][eob_ctx], 2,
+ ACCT_STR);
+#else
+ int is_eob = aom_read(
+ r, fc->hv_eob[txs_ctx][plane_type][tx_class][eob_ctx],
+ ACCT_STR);
+#endif
+ if (counts)
+ ++counts
+ ->hv_eob[txs_ctx][plane_type][tx_class][eob_ctx][is_eob];
+ if (is_eob) break;
+ }
+ }
+ } else {
+ int coeff_idx = row * width + col;
+ tcoeffs[coeff_idx] = 1;
+ eob = AOMMAX(eob, iscan[coeff_idx] + 1);
+ }
+ }
+ eob_ls[col] = AOMMIN(height, row + 1);
+ } else {
+ eob_ls[col] = 0;
+ }
+ }
+ return eob;
+}
+
+static INLINE int read_nz_map_horiz(aom_reader *r, tran_low_t *tcoeffs,
+ int plane, const int16_t *scan,
+ const int16_t *iscan, TX_SIZE tx_size,
+ TX_TYPE tx_type, FRAME_CONTEXT *fc,
+ FRAME_COUNTS *counts) {
+ const TX_SIZE txs_ctx = get_txsize_context(tx_size);
+ const PLANE_TYPE plane_type = get_plane_type(plane);
+ const TX_CLASS tx_class = get_tx_class(tx_type);
+ const int bwl = b_width_log2_lookup[txsize_to_bsize[tx_size]] + 2;
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ int16_t eob_ls[MAX_HVTX_SIZE];
+ int eob = 0;
+#if !LV_MAP_PROB
+ aom_prob *nz_map = fc->nz_map[txs_ctx][plane_type];
+#endif
+ for (int row = 0; row < height; ++row) {
+ int el_ctx = get_empty_line_ctx(row, eob_ls);
+#if LV_MAP_PROB
+ int empty_line = av1_read_record_bin(
+ counts, r, fc->empty_line_cdf[txs_ctx][plane_type][tx_class][el_ctx], 2,
+ ACCT_STR);
+#else
+ int empty_line = aom_read(
+ r, fc->empty_line[txs_ctx][plane_type][tx_class][el_ctx], ACCT_STR);
+#endif
+ if (counts)
+ ++counts->empty_line[txs_ctx][plane_type][tx_class][el_ctx][empty_line];
+ if (!empty_line) {
+ int col;
+ for (col = 0; col < width; ++col) {
+ if (col + 1 != width) {
+ int coeff_idx = row * width + col;
+ int scan_idx = iscan[coeff_idx];
+ int coeff_ctx =
+ get_nz_map_ctx(tcoeffs, scan_idx, scan, bwl, height, tx_type);
+#if LV_MAP_PROB
+ int is_nz = av1_read_record_bin(
+ counts, r, fc->nz_map_cdf[txs_ctx][plane_type][coeff_ctx], 2,
+ ACCT_STR);
+#else
+ int is_nz = aom_read(r, nz_map[coeff_ctx], ACCT_STR);
+#endif
+ if (counts) ++counts->nz_map[txs_ctx][plane_type][coeff_ctx][is_nz];
+ tcoeffs[coeff_idx] = is_nz;
+ if (is_nz) {
+ eob = AOMMAX(eob, iscan[coeff_idx] + 1);
+ int eob_ctx = get_hv_eob_ctx(row, col, eob_ls);
+#if LV_MAP_PROB
+ int is_eob = av1_read_record_bin(
+ counts, r,
+ fc->hv_eob_cdf[txs_ctx][plane_type][tx_class][eob_ctx], 2,
+ ACCT_STR);
+#else
+ int is_eob =
+ aom_read(r, fc->hv_eob[txs_ctx][plane_type][tx_class][eob_ctx],
+ ACCT_STR);
+#endif
+ if (counts)
+ ++counts->hv_eob[txs_ctx][plane_type][tx_class][eob_ctx][is_eob];
+ if (is_eob) break;
+ }
+ } else {
+ int coeff_idx = row * width + col;
+ tcoeffs[coeff_idx] = 1;
+ eob = AOMMAX(eob, iscan[coeff_idx] + 1);
+ }
+ }
+ eob_ls[row] = AOMMIN(width, col + 1);
+ } else {
+ eob_ls[row] = 0;
+ }
+ }
+ return eob;
+}
+#endif
+
uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
aom_reader *r, int blk_row, int blk_col, int block,
int plane, tran_low_t *tcoeffs, TXB_CTX *txb_ctx,
TX_SIZE tx_size, int16_t *max_scan_line, int *eob) {
+ FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
FRAME_COUNTS *counts = xd->counts;
TX_SIZE txs_ctx = get_txsize_context(tx_size);
PLANE_TYPE plane_type = get_plane_type(plane);
- aom_prob *nz_map = cm->fc->nz_map[txs_ctx][plane_type];
- aom_prob *eob_flag = cm->fc->eob_flag[txs_ctx][plane_type];
+#if !LV_MAP_PROB
+ aom_prob *nz_map = ec_ctx->nz_map[txs_ctx][plane_type];
+ aom_prob *eob_flag = ec_ctx->eob_flag[txs_ctx][plane_type];
+#endif
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const int seg_eob = tx_size_2d[tx_size];
int c = 0;
@@ -59,14 +289,16 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
const int bwl = b_width_log2_lookup[txsize_to_bsize[tx_size]] + 2;
const int height = tx_size_high[tx_size];
int cul_level = 0;
- unsigned int(*nz_map_count)[SIG_COEF_CONTEXTS][2];
-
- nz_map_count = (counts) ? &counts->nz_map[txs_ctx][plane_type] : NULL;
-
memset(tcoeffs, 0, sizeof(*tcoeffs) * seg_eob);
+#if LV_MAP_PROB
+ int all_zero = av1_read_record_bin(
+ counts, r, ec_ctx->txb_skip_cdf[txs_ctx][txb_ctx->txb_skip_ctx], 2,
+ ACCT_STR);
+#else
int all_zero =
- aom_read(r, cm->fc->txb_skip[txs_ctx][txb_ctx->txb_skip_ctx], ACCT_STR);
+ aom_read(r, ec_ctx->txb_skip[txs_ctx][txb_ctx->txb_skip_ctx], ACCT_STR);
+#endif
if (xd->counts)
++xd->counts->txb_skip[txs_ctx][txb_ctx->txb_skip_ctx][all_zero];
@@ -89,42 +321,46 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
av1_get_tx_type(plane_type, xd, blk_row, blk_col, block, tx_size);
const SCAN_ORDER *const scan_order = get_scan(cm, tx_size, tx_type, mbmi);
const int16_t *scan = scan_order->scan;
- const int16_t *iscan = scan_order->iscan;
-
- for (c = 0; c < seg_eob; ++c) {
- int is_nz;
- int coeff_ctx = get_nz_map_ctx(tcoeffs, scan[c], bwl, height, iscan);
- int eob_ctx = get_eob_ctx(tcoeffs, scan[c], txs_ctx);
-
- if (c < seg_eob - 1)
- is_nz = aom_read(r, nz_map[coeff_ctx], ACCT_STR);
- else
- is_nz = 1;
-
- // set non-zero coefficient map.
- tcoeffs[scan[c]] = is_nz;
-
- if (c == seg_eob - 1) {
- ++c;
- break;
- }
-
- if (counts) ++(*nz_map_count)[coeff_ctx][is_nz];
- if (is_nz) {
- int is_eob = aom_read(r, eob_flag[eob_ctx], ACCT_STR);
- if (counts) ++counts->eob_flag[txs_ctx][plane_type][eob_ctx][is_eob];
- if (is_eob) break;
+#if CONFIG_CTX1D
+ const int16_t *iscan = scan_order->iscan;
+ TX_CLASS tx_class = get_tx_class(tx_type);
+ if (tx_class == TX_CLASS_2D) {
+ *eob =
+ read_nz_map(r, tcoeffs, plane, scan, tx_size, tx_type, ec_ctx, counts);
+ } else {
+#if LV_MAP_PROB
+ const int eob_mode = av1_read_record_bin(
+ counts, r, ec_ctx->eob_mode_cdf[txs_ctx][plane_type][tx_class], 2,
+ ACCT_STR);
+#else
+ const int eob_mode =
+ aom_read(r, ec_ctx->eob_mode[txs_ctx][plane_type][tx_class], ACCT_STR);
+#endif
+ if (counts) ++counts->eob_mode[txs_ctx][plane_type][tx_class][eob_mode];
+ if (eob_mode == 0) {
+ *eob = read_nz_map(r, tcoeffs, plane, scan, tx_size, tx_type, ec_ctx,
+ counts);
+ } else {
+ assert(tx_class == TX_CLASS_VERT || tx_class == TX_CLASS_HORIZ);
+ if (tx_class == TX_CLASS_VERT)
+ *eob = read_nz_map_vert(r, tcoeffs, plane, scan, iscan, tx_size,
+ tx_type, ec_ctx, counts);
+ else
+ *eob = read_nz_map_horiz(r, tcoeffs, plane, scan, iscan, tx_size,
+ tx_type, ec_ctx, counts);
}
}
-
- *eob = AOMMIN(seg_eob, c + 1);
+#else
+ *eob = read_nz_map(r, tcoeffs, plane, scan, tx_size, tx_type, ec_ctx, counts);
+#endif
*max_scan_line = *eob;
int i;
for (i = 0; i < NUM_BASE_LEVELS; ++i) {
- aom_prob *coeff_base = cm->fc->coeff_base[txs_ctx][plane_type][i];
-
+#if !LV_MAP_PROB
+ aom_prob *coeff_base = ec_ctx->coeff_base[txs_ctx][plane_type][i];
+#endif
update_eob = 0;
for (c = *eob - 1; c >= 0; --c) {
tran_low_t *v = &tcoeffs[scan[c]];
@@ -135,7 +371,14 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
ctx = get_base_ctx(tcoeffs, scan[c], bwl, height, i + 1);
- if (aom_read(r, coeff_base[ctx], ACCT_STR)) {
+#if LV_MAP_PROB
+ if (av1_read_record_bin(
+ counts, r, ec_ctx->coeff_base_cdf[txs_ctx][plane_type][i][ctx], 2,
+ ACCT_STR))
+#else
+ if (aom_read(r, coeff_base[ctx], ACCT_STR))
+#endif
+ {
*v = i + 1;
cul_level += i + 1;
@@ -143,11 +386,17 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
if (c == 0) {
int dc_sign_ctx = txb_ctx->dc_sign_ctx;
+#if LV_MAP_PROB
+ sign = av1_read_record_bin(
+ counts, r, ec_ctx->dc_sign_cdf[plane_type][dc_sign_ctx], 2,
+ ACCT_STR);
+#else
sign =
- aom_read(r, cm->fc->dc_sign[plane_type][dc_sign_ctx], ACCT_STR);
+ aom_read(r, ec_ctx->dc_sign[plane_type][dc_sign_ctx], ACCT_STR);
+#endif
if (counts) ++counts->dc_sign[plane_type][dc_sign_ctx][sign];
} else {
- sign = aom_read_bit(r, ACCT_STR);
+ sign = av1_read_record_bit(counts, r, ACCT_STR);
}
if (sign) *v = -(*v);
continue;
@@ -170,18 +419,74 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
if (c == 0) {
int dc_sign_ctx = txb_ctx->dc_sign_ctx;
- sign = aom_read(r, cm->fc->dc_sign[plane_type][dc_sign_ctx], ACCT_STR);
+#if LV_MAP_PROB
+ sign = av1_read_record_bin(
+ counts, r, ec_ctx->dc_sign_cdf[plane_type][dc_sign_ctx], 2, ACCT_STR);
+#else
+ sign = aom_read(r, ec_ctx->dc_sign[plane_type][dc_sign_ctx], ACCT_STR);
+#endif
if (counts) ++counts->dc_sign[plane_type][dc_sign_ctx][sign];
} else {
- sign = aom_read_bit(r, ACCT_STR);
+ sign = av1_read_record_bit(counts, r, ACCT_STR);
}
ctx = get_br_ctx(tcoeffs, scan[c], bwl, height);
- if (cm->fc->coeff_lps[txs_ctx][plane_type][ctx] == 0) exit(0);
+#if BR_NODE
+ for (idx = 0; idx < BASE_RANGE_SETS; ++idx) {
+#if LV_MAP_PROB
+ if (av1_read_record_bin(
+ counts, r, ec_ctx->coeff_br_cdf[txs_ctx][plane_type][idx][ctx], 2,
+ ACCT_STR))
+#else // LV_MAP_PROB
+ if (aom_read(r, ec_ctx->coeff_br[txs_ctx][plane_type][idx][ctx],
+ ACCT_STR))
+#endif // LV_MAP_PROB
+ {
+ int extra_bits = (1 << br_extra_bits[idx]) - 1;
+ // int br_offset = aom_read_literal(r, extra_bits, ACCT_STR);
+ int br_offset = 0;
+ int tok;
+ if (counts) ++counts->coeff_br[txs_ctx][plane_type][idx][ctx][1];
+ for (tok = 0; tok < extra_bits; ++tok) {
+#if LV_MAP_PROB
+ if (av1_read_record_bin(
+ counts, r, ec_ctx->coeff_lps_cdf[txs_ctx][plane_type][ctx], 2,
+ ACCT_STR))
+#else
+ if (aom_read(r, ec_ctx->coeff_lps[txs_ctx][plane_type][ctx],
+ ACCT_STR))
+#endif
+ {
+ br_offset = tok;
+ if (counts) ++counts->coeff_lps[txs_ctx][plane_type][ctx][1];
+ break;
+ }
+ if (counts) ++counts->coeff_lps[txs_ctx][plane_type][ctx][0];
+ }
+ if (tok == extra_bits) br_offset = extra_bits;
+ int br_base = br_index_to_coeff[idx];
+
+ *v = NUM_BASE_LEVELS + 1 + br_base + br_offset;
+ cul_level += *v;
+ if (sign) *v = -(*v);
+ break;
+ }
+ if (counts) ++counts->coeff_br[txs_ctx][plane_type][idx][ctx][0];
+ }
+
+ if (idx < BASE_RANGE_SETS) continue;
+#else
for (idx = 0; idx < COEFF_BASE_RANGE; ++idx) {
- if (aom_read(r, cm->fc->coeff_lps[txs_ctx][plane_type][ctx], ACCT_STR)) {
+#if LV_MAP_PROB
+ if (av1_read_record_bin(counts, r,
+ ec_ctx->coeff_lps_cdf[txs_ctx][plane_type][ctx],
+ 2, ACCT_STR))
+#else
+ if (aom_read(r, ec_ctx->coeff_lps[txs_ctx][plane_type][ctx], ACCT_STR))
+#endif
+ {
*v = (idx + 1 + NUM_BASE_LEVELS);
if (sign) *v = -(*v);
cul_level += abs(*v);
@@ -192,9 +497,10 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
if (counts) ++counts->coeff_lps[txs_ctx][plane_type][ctx][0];
}
if (idx < COEFF_BASE_RANGE) continue;
+#endif
// decode 0-th order Golomb code
- *v = read_golomb(xd, r) + COEFF_BASE_RANGE + 1 + NUM_BASE_LEVELS;
+ *v = read_golomb(xd, r, counts) + COEFF_BASE_RANGE + 1 + NUM_BASE_LEVELS;
if (sign) *v = -(*v);
cul_level += abs(*v);
}
@@ -202,6 +508,9 @@ uint8_t av1_read_coeffs_txb(const AV1_COMMON *const cm, MACROBLOCKD *xd,
for (c = 0; c < *eob; ++c) {
int16_t dqv = (c == 0) ? dequant[0] : dequant[1];
tran_low_t *v = &tcoeffs[scan[c]];
+#if CONFIG_SYMBOLRATE
+ av1_record_coeff(counts, abs(*v));
+#endif
int sign = (*v) < 0;
*v = (abs(*v) * dqv) >> shift;
if (sign) *v = -(*v);
@@ -251,11 +560,15 @@ uint8_t av1_read_coeffs_txb_facade(AV1_COMMON *cm, MACROBLOCKD *xd,
return cul_level;
}
+#if !LV_MAP_PROB
static void read_txb_probs(FRAME_CONTEXT *fc, const TX_SIZE tx_size,
- aom_reader *r) {
+ aom_reader *r, FRAME_COUNTS *counts) {
+#if !CONFIG_SYMBOLRATE
+ (void)counts;
+#endif
int plane, ctx, level;
- if (aom_read_bit(r, ACCT_STR) == 0) return;
+ if (av1_read_record_bit(counts, r, ACCT_STR) == 0) return;
for (ctx = 0; ctx < TXB_SKIP_CONTEXTS; ++ctx)
av1_diff_update_prob(r, &fc->txb_skip[tx_size][ctx], ACCT_STR);
@@ -279,14 +592,17 @@ static void read_txb_probs(FRAME_CONTEXT *fc, const TX_SIZE tx_size,
av1_diff_update_prob(r, &fc->coeff_lps[tx_size][plane][ctx], ACCT_STR);
}
-void av1_read_txb_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r) {
+void av1_read_txb_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r,
+ FRAME_COUNTS *counts) {
const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
TX_SIZE tx_size;
int ctx, plane;
+
for (plane = 0; plane < PLANE_TYPES; ++plane)
for (ctx = 0; ctx < DC_SIGN_CONTEXTS; ++ctx)
av1_diff_update_prob(r, &fc->dc_sign[plane][ctx], ACCT_STR);
for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size)
- read_txb_probs(fc, tx_size, r);
+ read_txb_probs(fc, tx_size, r, counts);
}
+#endif // !LV_MAP_PROB
diff --git a/third_party/aom/av1/decoder/decodetxb.h b/third_party/aom/av1/decoder/decodetxb.h
index 313476139..1c6512e97 100644
--- a/third_party/aom/av1/decoder/decodetxb.h
+++ b/third_party/aom/av1/decoder/decodetxb.h
@@ -28,5 +28,8 @@ uint8_t av1_read_coeffs_txb_facade(AV1_COMMON *cm, MACROBLOCKD *xd,
int plane, tran_low_t *tcoeffs,
TX_SIZE tx_size, int16_t *max_scan_line,
int *eob);
-void av1_read_txb_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r);
+#if !LV_MAP_PROB
+void av1_read_txb_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r,
+ FRAME_COUNTS *counts);
+#endif // !LV_MAP_PROB
#endif // DECODETXB_H_
diff --git a/third_party/aom/av1/decoder/detokenize.c b/third_party/aom/av1/decoder/detokenize.c
index 461494dfe..a59a7bac1 100644
--- a/third_party/aom/av1/decoder/detokenize.c
+++ b/third_party/aom/av1/decoder/detokenize.c
@@ -24,7 +24,11 @@
#include "av1/common/common.h"
#include "av1/common/entropy.h"
#include "av1/common/idct.h"
+#endif
+
+#include "av1/decoder/symbolrate.h"
+#if !CONFIG_PVQ || CONFIG_VAR_TX
#define EOB_CONTEXT_NODE 0
#define ZERO_CONTEXT_NODE 1
#define ONE_CONTEXT_NODE 2
@@ -43,31 +47,43 @@
} while (0)
#if CONFIG_NEW_MULTISYMBOL
-#define READ_COEFF(prob_name, cdf_name, num, r) read_coeff(cdf_name, num, r);
-static INLINE int read_coeff(const aom_cdf_prob *const *cdf, int n,
+#define READ_COEFF(counts, prob_name, cdf_name, num, r) \
+ read_coeff(counts, cdf_name, num, r);
+static INLINE int read_coeff(FRAME_COUNTS *counts,
+ const aom_cdf_prob *const *cdf, int n,
aom_reader *r) {
+#if !CONFIG_SYMBOLRATE
+ (void)counts;
+#endif
int val = 0;
int i = 0;
int count = 0;
while (count < n) {
const int size = AOMMIN(n - count, 4);
- val |= aom_read_cdf(r, cdf[i++], 1 << size, ACCT_STR) << count;
+ val |= av1_read_record_cdf(counts, r, cdf[i++], 1 << size, ACCT_STR)
+ << count;
count += size;
}
return val;
}
#else
-#define READ_COEFF(prob_name, cdf_name, num, r) read_coeff(prob_name, num, r);
-static INLINE int read_coeff(const aom_prob *probs, int n, aom_reader *r) {
+#define READ_COEFF(counts, prob_name, cdf_name, num, r) \
+ read_coeff(counts, prob_name, num, r);
+static INLINE int read_coeff(FRAME_COUNTS *counts, const aom_prob *probs, int n,
+ aom_reader *r) {
+#if !CONFIG_SYMBOLRATE
+ (void)counts;
+#endif
int i, val = 0;
- for (i = 0; i < n; ++i) val = (val << 1) | aom_read(r, probs[i], ACCT_STR);
+ for (i = 0; i < n; ++i)
+ val = (val << 1) | av1_read_record(counts, r, probs[i], ACCT_STR);
return val;
}
#endif
-static int token_to_value(aom_reader *const r, int token, TX_SIZE tx_size,
- int bit_depth) {
+static int token_to_value(FRAME_COUNTS *counts, aom_reader *const r, int token,
+ TX_SIZE tx_size, int bit_depth) {
#if !CONFIG_HIGHBITDEPTH
assert(bit_depth == 8);
#endif // !CONFIG_HIGHBITDEPTH
@@ -79,20 +95,25 @@ static int token_to_value(aom_reader *const r, int token, TX_SIZE tx_size,
case THREE_TOKEN:
case FOUR_TOKEN: return token;
case CATEGORY1_TOKEN:
- return CAT1_MIN_VAL + READ_COEFF(av1_cat1_prob, av1_cat1_cdf, 1, r);
+ return CAT1_MIN_VAL +
+ READ_COEFF(counts, av1_cat1_prob, av1_cat1_cdf, 1, r);
case CATEGORY2_TOKEN:
- return CAT2_MIN_VAL + READ_COEFF(av1_cat2_prob, av1_cat2_cdf, 2, r);
+ return CAT2_MIN_VAL +
+ READ_COEFF(counts, av1_cat2_prob, av1_cat2_cdf, 2, r);
case CATEGORY3_TOKEN:
- return CAT3_MIN_VAL + READ_COEFF(av1_cat3_prob, av1_cat3_cdf, 3, r);
+ return CAT3_MIN_VAL +
+ READ_COEFF(counts, av1_cat3_prob, av1_cat3_cdf, 3, r);
case CATEGORY4_TOKEN:
- return CAT4_MIN_VAL + READ_COEFF(av1_cat4_prob, av1_cat4_cdf, 4, r);
+ return CAT4_MIN_VAL +
+ READ_COEFF(counts, av1_cat4_prob, av1_cat4_cdf, 4, r);
case CATEGORY5_TOKEN:
- return CAT5_MIN_VAL + READ_COEFF(av1_cat5_prob, av1_cat5_cdf, 5, r);
+ return CAT5_MIN_VAL +
+ READ_COEFF(counts, av1_cat5_prob, av1_cat5_cdf, 5, r);
case CATEGORY6_TOKEN: {
const int skip_bits = (int)sizeof(av1_cat6_prob) -
av1_get_cat6_extrabits_size(tx_size, bit_depth);
- return CAT6_MIN_VAL + READ_COEFF(av1_cat6_prob + skip_bits, av1_cat6_cdf,
- 18 - skip_bits, r);
+ return CAT6_MIN_VAL + READ_COEFF(counts, av1_cat6_prob + skip_bits,
+ av1_cat6_cdf, 18 - skip_bits, r);
}
default:
assert(0); // Invalid token.
@@ -104,22 +125,22 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
TX_SIZE tx_size, TX_TYPE tx_type, const int16_t *dq,
#if CONFIG_NEW_QUANT
dequant_val_type_nuq *dq_val,
-#endif // CONFIG_NEW_QUANT
+#else
#if CONFIG_AOM_QM
- const qm_val_t *iqm[2][TX_SIZES_ALL],
+ qm_val_t *iqm[2][TX_SIZES_ALL],
#endif // CONFIG_AOM_QM
+#endif // CONFIG_NEW_QUANT
int ctx, const int16_t *scan, const int16_t *nb,
int16_t *max_scan_line, aom_reader *r) {
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
const int max_eob = tx_size_2d[tx_size];
const int ref = is_inter_block(&xd->mi[0]->mbmi);
-#if CONFIG_AOM_QM
+#if CONFIG_AOM_QM && !CONFIG_NEW_QUANT
const qm_val_t *iqmatrix = iqm[!ref][tx_size];
-#else
- (void)tx_type;
#endif // CONFIG_AOM_QM
+ (void)tx_type;
int band, c = 0;
- const int tx_size_ctx = txsize_sqr_map[tx_size];
+ const TX_SIZE tx_size_ctx = txsize_sqr_map[tx_size];
aom_cdf_prob(*coef_head_cdfs)[COEFF_CONTEXTS][CDF_SIZE(ENTROPY_TOKENS)] =
ec_ctx->coef_head_cdfs[tx_size_ctx][type][ref];
aom_cdf_prob(*coef_tail_cdfs)[COEFF_CONTEXTS][CDF_SIZE(ENTROPY_TOKENS)] =
@@ -130,7 +151,7 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
const uint8_t *band_translate = get_band_translate(tx_size);
int dq_shift;
int v, token;
- int16_t dqv = dq[0];
+ int32_t dqv = dq[0];
#if CONFIG_NEW_QUANT
const tran_low_t *dqv_val = &dq_val[0][0];
#endif // CONFIG_NEW_QUANT
@@ -149,9 +170,10 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
dqv_val = &dq_val[band][0];
#endif // CONFIG_NEW_QUANT
- comb_token = last_pos ? 2 * aom_read_bit(r, ACCT_STR) + 2
- : aom_read_symbol(r, coef_head_cdfs[band][ctx],
- HEAD_TOKENS + first_pos, ACCT_STR) +
+ comb_token = last_pos ? 2 * av1_read_record_bit(xd->counts, r, ACCT_STR) + 2
+ : av1_read_record_symbol(
+ xd->counts, r, coef_head_cdfs[band][ctx],
+ HEAD_TOKENS + first_pos, ACCT_STR) +
!first_pos;
if (first_pos) {
if (comb_token == 0) return 0;
@@ -161,6 +183,9 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
while (!token) {
*max_scan_line = AOMMAX(*max_scan_line, scan[c]);
token_cache[scan[c]] = 0;
+#if CONFIG_SYMBOLRATE
+ av1_record_coeff(xd->counts, 0);
+#endif
++c;
dqv = dq[1];
ctx = get_coef_context(nb, token_cache, c);
@@ -168,18 +193,20 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
last_pos = (c + 1 == max_eob);
- comb_token = last_pos ? 2 * aom_read_bit(r, ACCT_STR) + 2
- : aom_read_symbol(r, coef_head_cdfs[band][ctx],
- HEAD_TOKENS, ACCT_STR) +
- 1;
+ comb_token =
+ last_pos
+ ? 2 * av1_read_record_bit(xd->counts, r, ACCT_STR) + 2
+ : av1_read_record_symbol(xd->counts, r, coef_head_cdfs[band][ctx],
+ HEAD_TOKENS, ACCT_STR) +
+ 1;
token = comb_token >> 1;
}
more_data = comb_token & 1;
if (token > ONE_TOKEN)
- token +=
- aom_read_symbol(r, coef_tail_cdfs[band][ctx], TAIL_TOKENS, ACCT_STR);
+ token += av1_read_record_symbol(xd->counts, r, coef_tail_cdfs[band][ctx],
+ TAIL_TOKENS, ACCT_STR);
#if CONFIG_NEW_QUANT
dqv_val = &dq_val[band][0];
#endif // CONFIG_NEW_QUANT
@@ -187,7 +214,10 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
*max_scan_line = AOMMAX(*max_scan_line, scan[c]);
token_cache[scan[c]] = av1_pt_energy_class[token];
- val = token_to_value(r, token, tx_size, xd->bd);
+ val = token_to_value(xd->counts, r, token, tx_size, xd->bd);
+#if CONFIG_SYMBOLRATE
+ av1_record_coeff(xd->counts, val);
+#endif
#if CONFIG_NEW_QUANT
v = av1_dequant_abscoeff_nuq(val, dqv, dqv_val);
@@ -195,14 +225,15 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
#else
#if CONFIG_AOM_QM
// Apply quant matrix only for 2D transforms
- if (IS_2D_TRANSFORM(tx_type))
+ if (IS_2D_TRANSFORM(tx_type) && iqmatrix != NULL)
dqv = ((iqmatrix[scan[c]] * (int)dqv) + (1 << (AOM_QM_BITS - 1))) >>
AOM_QM_BITS;
#endif
v = (val * dqv) >> dq_shift;
#endif
- v = (int)check_range(aom_read_bit(r, ACCT_STR) ? -v : v, xd->bd);
+ v = (int)check_range(av1_read_record_bit(xd->counts, r, ACCT_STR) ? -v : v,
+ xd->bd);
dqcoeff[scan[c]] = v;
@@ -218,22 +249,15 @@ static int decode_coefs(MACROBLOCKD *xd, PLANE_TYPE type, tran_low_t *dqcoeff,
}
#endif // !CONFIG_PVQ
-#if CONFIG_PALETTE
-void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
- aom_reader *r) {
- const MODE_INFO *const mi = xd->mi[0];
- const MB_MODE_INFO *const mbmi = &mi->mbmi;
+static void decode_color_map_tokens(Av1ColorMapParam *param, aom_reader *r) {
uint8_t color_order[PALETTE_MAX_SIZE];
- const int n = mbmi->palette_mode_info.palette_size[plane];
- uint8_t *const color_map = xd->plane[plane].color_index_map;
- aom_cdf_prob(
- *palette_cdf)[PALETTE_COLOR_INDEX_CONTEXTS][CDF_SIZE(PALETTE_COLORS)] =
- plane ? xd->tile_ctx->palette_uv_color_index_cdf
- : xd->tile_ctx->palette_y_color_index_cdf;
- int plane_block_width, plane_block_height, rows, cols;
- av1_get_block_dimensions(mbmi->sb_type, plane, xd, &plane_block_width,
- &plane_block_height, &rows, &cols);
- assert(plane == 0 || plane == 1);
+ const int n = param->n_colors;
+ uint8_t *const color_map = param->color_map;
+ MapCdf color_map_cdf = param->map_cdf;
+ int plane_block_width = param->plane_width;
+ int plane_block_height = param->plane_height;
+ int rows = param->rows;
+ int cols = param->cols;
// The first color index.
color_map[0] = av1_read_uniform(r, n);
@@ -246,14 +270,14 @@ void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
const int color_ctx = av1_get_palette_color_index_context(
color_map, plane_block_width, (i - j), j, n, color_order, NULL);
const int color_idx = aom_read_symbol(
- r, palette_cdf[n - PALETTE_MIN_SIZE][color_ctx], n, ACCT_STR);
+ r, color_map_cdf[n - PALETTE_MIN_SIZE][color_ctx], n, ACCT_STR);
assert(color_idx >= 0 && color_idx < n);
color_map[(i - j) * plane_block_width + j] = color_order[color_idx];
}
}
// Copy last column to extra columns.
if (cols < plane_block_width) {
- for (int i = 0; i < plane_block_height; ++i) {
+ for (int i = 0; i < rows; ++i) {
memset(color_map + i * plane_block_width + cols,
color_map[i * plane_block_width + cols - 1],
(plane_block_width - cols));
@@ -265,7 +289,7 @@ void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
const int color_ctx = av1_get_palette_color_index_context(
color_map, plane_block_width, i, j, n, color_order, NULL);
const int color_idx = aom_read_symbol(
- r, palette_cdf[n - PALETTE_MIN_SIZE][color_ctx], n, ACCT_STR);
+ r, color_map_cdf[n - PALETTE_MIN_SIZE][color_ctx], n, ACCT_STR);
assert(color_idx >= 0 && color_idx < n);
color_map[i * plane_block_width + j] = color_order[color_idx];
}
@@ -280,7 +304,60 @@ void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
color_map + (rows - 1) * plane_block_width, plane_block_width);
}
}
-#endif // CONFIG_PALETTE
+
+static void get_palette_params(const MACROBLOCKD *const xd, int plane,
+ BLOCK_SIZE bsize, Av1ColorMapParam *params) {
+ assert(plane == 0 || plane == 1);
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ const PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
+ params->color_map = xd->plane[plane].color_index_map;
+ params->map_cdf = plane ? xd->tile_ctx->palette_uv_color_index_cdf
+ : xd->tile_ctx->palette_y_color_index_cdf;
+ params->n_colors = pmi->palette_size[plane];
+ av1_get_block_dimensions(bsize, plane, xd, &params->plane_width,
+ &params->plane_height, &params->rows, &params->cols);
+}
+
+#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
+static void get_mrc_params(const MACROBLOCKD *const xd, TX_SIZE tx_size,
+ Av1ColorMapParam *params) {
+ memset(params, 0, sizeof(*params));
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ const int is_inter = is_inter_block(mbmi);
+ params->color_map = xd->mrc_mask;
+ params->map_cdf = is_inter ? xd->tile_ctx->mrc_mask_inter_cdf
+ : xd->tile_ctx->mrc_mask_intra_cdf;
+ params->n_colors = 2;
+ params->plane_width = tx_size_wide[tx_size];
+ params->rows = tx_size_high[tx_size];
+ params->cols = tx_size_wide[tx_size];
+}
+#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
+
+void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane,
+ aom_reader *r) {
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ assert(plane == 0 || plane == 1);
+ assert(mbmi->sb_type >= BLOCK_8X8);
+ Av1ColorMapParam color_map_params;
+ memset(&color_map_params, 0, sizeof(color_map_params));
+ get_palette_params(xd, plane, mbmi->sb_type, &color_map_params);
+ decode_color_map_tokens(&color_map_params, r);
+}
+
+#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
+static void decode_mrc_tokens(MACROBLOCKD *const xd, TX_TYPE tx_size,
+ aom_reader *r) {
+ const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
+ const int is_inter = is_inter_block(mbmi);
+ if ((is_inter && !SIGNAL_MRC_MASK_INTER) ||
+ (!is_inter && !SIGNAL_MRC_MASK_INTRA))
+ return;
+ Av1ColorMapParam color_map_params;
+ get_mrc_params(xd, tx_size, &color_map_params);
+ decode_color_map_tokens(&color_map_params, r);
+}
+#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
#if !CONFIG_PVQ || CONFIG_VAR_TX
int av1_decode_block_tokens(AV1_COMMON *cm, MACROBLOCKD *const xd, int plane,
@@ -297,14 +374,19 @@ int av1_decode_block_tokens(AV1_COMMON *cm, MACROBLOCKD *const xd, int plane,
get_dq_profile_from_ctx(xd->qindex[seg_id], ctx, ref, pd->plane_type);
#endif // CONFIG_NEW_QUANT
+#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
+ if (tx_type == MRC_DCT) decode_mrc_tokens(xd, tx_size, r);
+#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
+
const int eob =
decode_coefs(xd, pd->plane_type, pd->dqcoeff, tx_size, tx_type, dequant,
#if CONFIG_NEW_QUANT
pd->seg_dequant_nuq[seg_id][dq],
-#endif // CONFIG_NEW_QUANT
+#else
#if CONFIG_AOM_QM
pd->seg_iqmatrix[seg_id],
#endif // CONFIG_AOM_QM
+#endif // CONFIG_NEW_QUANT
ctx, sc->scan, sc->neighbors, max_scan_line, r);
av1_set_contexts(xd, pd, plane, tx_size, eob > 0, x, y);
#if CONFIG_ADAPT_SCAN
diff --git a/third_party/aom/av1/decoder/detokenize.h b/third_party/aom/av1/decoder/detokenize.h
index 0e58a2803..eb31d58c6 100644
--- a/third_party/aom/av1/decoder/detokenize.h
+++ b/third_party/aom/av1/decoder/detokenize.h
@@ -22,9 +22,7 @@
extern "C" {
#endif
-#if CONFIG_PALETTE
void av1_decode_palette_tokens(MACROBLOCKD *const xd, int plane, aom_reader *r);
-#endif // CONFIG_PALETTE
#if !CONFIG_PVQ || CONFIG_VAR_TX
int av1_decode_block_tokens(AV1_COMMON *cm, MACROBLOCKD *const xd, int plane,
diff --git a/third_party/aom/av1/decoder/dthread.c b/third_party/aom/av1/decoder/dthread.c
index 50f8ed192..7f16b233c 100644
--- a/third_party/aom/av1/decoder/dthread.c
+++ b/third_party/aom/av1/decoder/dthread.c
@@ -181,7 +181,12 @@ void av1_frameworker_copy_context(AVxWorker *const dst_worker,
memcpy(dst_cm->lf_info.lfthr, src_cm->lf_info.lfthr,
(MAX_LOOP_FILTER + 1) * sizeof(loop_filter_thresh));
dst_cm->lf.last_sharpness_level = src_cm->lf.sharpness_level;
+#if CONFIG_LOOPFILTER_LEVEL
+ dst_cm->lf.filter_level[0] = src_cm->lf.filter_level[0];
+ dst_cm->lf.filter_level[1] = src_cm->lf.filter_level[1];
+#else
dst_cm->lf.filter_level = src_cm->lf.filter_level;
+#endif
memcpy(dst_cm->lf.ref_deltas, src_cm->lf.ref_deltas, TOTAL_REFS_PER_FRAME);
memcpy(dst_cm->lf.mode_deltas, src_cm->lf.mode_deltas, MAX_MODE_LF_DELTAS);
dst_cm->seg = src_cm->seg;
diff --git a/third_party/aom/av1/decoder/inspection.c b/third_party/aom/av1/decoder/inspection.c
index 4f98f18ea..98c51d4ba 100644
--- a/third_party/aom/av1/decoder/inspection.c
+++ b/third_party/aom/av1/decoder/inspection.c
@@ -18,13 +18,19 @@
#include "av1/common/cfl.h"
#endif
-void ifd_init(insp_frame_data *fd, int frame_width, int frame_height) {
- fd->mi_cols = ALIGN_POWER_OF_TWO(frame_width, 3) >> MI_SIZE_LOG2;
- fd->mi_rows = ALIGN_POWER_OF_TWO(frame_height, 3) >> MI_SIZE_LOG2;
+static void ifd_init_mi_rc(insp_frame_data *fd, int mi_cols, int mi_rows) {
+ fd->mi_cols = mi_cols;
+ fd->mi_rows = mi_rows;
fd->mi_grid = (insp_mi_data *)aom_malloc(sizeof(insp_mi_data) * fd->mi_rows *
fd->mi_cols);
}
+void ifd_init(insp_frame_data *fd, int frame_width, int frame_height) {
+ int mi_cols = ALIGN_POWER_OF_TWO(frame_width, 3) >> MI_SIZE_LOG2;
+ int mi_rows = ALIGN_POWER_OF_TWO(frame_height, 3) >> MI_SIZE_LOG2;
+ ifd_init_mi_rc(fd, mi_cols, mi_rows);
+}
+
void ifd_clear(insp_frame_data *fd) {
aom_free(fd->mi_grid);
fd->mi_grid = NULL;
@@ -35,9 +41,9 @@ void ifd_clear(insp_frame_data *fd) {
int ifd_inspect(insp_frame_data *fd, void *decoder) {
struct AV1Decoder *pbi = (struct AV1Decoder *)decoder;
AV1_COMMON *const cm = &pbi->common;
- // TODO(negge): Should this function just call ifd_clear() and ifd_init()?
if (fd->mi_rows != cm->mi_rows || fd->mi_cols != cm->mi_cols) {
- return 0;
+ ifd_clear(fd);
+ ifd_init_mi_rc(fd, cm->mi_rows, cm->mi_cols);
}
fd->show_frame = cm->show_frame;
fd->frame_type = cm->frame_type;
@@ -85,26 +91,26 @@ int ifd_inspect(insp_frame_data *fd, void *decoder) {
// Skip Flag
mi->skip = mbmi->skip;
#if CONFIG_DUAL_FILTER
- mi->filter[0] = mbmi->interp_filter[0];
- mi->filter[1] = mbmi->interp_filter[1];
+ mi->filter[0] = av1_extract_interp_filter(mbmi->interp_filters, 0);
+ mi->filter[1] = av1_extract_interp_filter(mbmi->interp_filters, 1);
#else
- mi->filter = mbmi->interp_filter;
+ mi->filter = av1_extract_interp_filter(mbmi->interp_filters, 0);
#endif
// Transform
mi->tx_type = mbmi->tx_type;
mi->tx_size = mbmi->tx_size;
#if CONFIG_CDEF
- mi->cdef_level = cm->cdef_strengths[mbmi->cdef_strength] / CLPF_STRENGTHS;
+ mi->cdef_level =
+ cm->cdef_strengths[mbmi->cdef_strength] / CDEF_SEC_STRENGTHS;
mi->cdef_strength =
- cm->cdef_strengths[mbmi->cdef_strength] % CLPF_STRENGTHS;
+ cm->cdef_strengths[mbmi->cdef_strength] % CDEF_SEC_STRENGTHS;
mi->cdef_strength += mi->cdef_strength == 3;
#endif
#if CONFIG_CFL
- if (mbmi->uv_mode == UV_DC_PRED) {
+ if (mbmi->uv_mode == UV_CFL_PRED) {
mi->cfl_alpha_idx = mbmi->cfl_alpha_idx;
- mi->cfl_alpha_sign = (mbmi->cfl_alpha_signs[CFL_PRED_V] << CFL_PRED_V) +
- mbmi->cfl_alpha_signs[CFL_PRED_U];
+ mi->cfl_alpha_sign = mbmi->cfl_alpha_signs;
} else {
mi->cfl_alpha_idx = 0;
mi->cfl_alpha_sign = 0;
diff --git a/third_party/aom/av1/decoder/symbolrate.h b/third_party/aom/av1/decoder/symbolrate.h
new file mode 100644
index 000000000..023287732
--- /dev/null
+++ b/third_party/aom/av1/decoder/symbolrate.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/bitreader.h"
+
+#ifndef AV1_DECODER_SYMBOLRATE_H_
+#define AV1_DECODER_SYMBOLRATE_H_
+
+#if CONFIG_SYMBOLRATE
+static INLINE void av1_dump_symbol_rate(struct AV1Common *cm) {
+ const FRAME_COUNTS *counts = &cm->counts;
+ printf("%d %d %d %d\n", counts->coeff_num[0], counts->coeff_num[1],
+ counts->symbol_num[0], counts->symbol_num[1]);
+}
+static INLINE int av1_read_record_symbol(FRAME_COUNTS *counts, aom_reader *r,
+ aom_cdf_prob *cdf, int nsymbs,
+ const char *str) {
+ (void)str;
+ if (counts) ++counts->symbol_num[0];
+ return aom_read_symbol(r, cdf, nsymbs, str);
+}
+
+#if CONFIG_LV_MAP
+static INLINE int av1_read_record_bin(FRAME_COUNTS *counts, aom_reader *r,
+ aom_cdf_prob *cdf, int nsymbs,
+ const char *str) {
+ (void)str;
+ if (counts) ++counts->symbol_num[0];
+ return aom_read_bin(r, cdf, nsymbs, str);
+}
+#endif
+
+static INLINE int av1_read_record(FRAME_COUNTS *counts, aom_reader *r, int prob,
+ const char *str) {
+ (void)str;
+ if (counts) ++counts->symbol_num[0];
+ return aom_read(r, prob, str);
+}
+
+static INLINE int av1_read_record_cdf(FRAME_COUNTS *counts, aom_reader *r,
+ const aom_cdf_prob *cdf, int nsymbs,
+ const char *str) {
+ (void)str;
+ if (counts) ++counts->symbol_num[0];
+ return aom_read_cdf(r, cdf, nsymbs, str);
+}
+
+static INLINE int av1_read_record_bit(FRAME_COUNTS *counts, aom_reader *r,
+ const char *str) {
+ (void)str;
+ if (counts) ++counts->symbol_num[1];
+ return aom_read_bit(r, str);
+}
+
+static INLINE void av1_record_coeff(FRAME_COUNTS *counts, tran_low_t qcoeff) {
+ assert(qcoeff >= 0);
+ if (counts) ++counts->coeff_num[qcoeff != 0];
+}
+#else // CONFIG_SYMBOLRATE
+
+#define av1_read_record_symbol(counts, r, cdf, nsymbs, ACCT_STR_NAME) \
+ aom_read_symbol(r, cdf, nsymbs, ACCT_STR_NAME)
+
+#if CONFIG_LV_MAP
+#define av1_read_record_bin(counts, r, cdf, nsymbs, ACCT_STR_NAME) \
+ aom_read_bin(r, cdf, nsymbs, ACCT_STR_NAME)
+#endif
+
+#define av1_read_record(counts, r, prob, ACCT_STR_NAME) \
+ aom_read(r, prob, ACCT_STR_NAME)
+
+#define av1_read_record_cdf(counts, r, cdf, nsymbs, ACCT_STR_NAME) \
+ aom_read_cdf(r, cdf, nsymbs, ACCT_STR_NAME)
+
+#define av1_read_record_bit(counts, r, ACCT_STR_NAME) \
+ aom_read_bit(r, ACCT_STR_NAME)
+
+#endif // CONFIG_SYMBOLRATE
+
+#endif // AV1_DECODER_SYMBOLRATE_H_