summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/decoder/decodemv.c
diff options
context:
space:
mode:
authortrav90 <travawine@palemoon.org>2018-10-18 06:04:57 -0500
committertrav90 <travawine@palemoon.org>2018-10-18 06:04:57 -0500
commit7369c7d7a5eed32963d8af37658286617919f91c (patch)
tree5397ce7ee9bca1641118fdc3187bd9e2b24fdc9c /third_party/aom/av1/decoder/decodemv.c
parent77887af9c4ad1420bbdb33984af4f74b55ca59db (diff)
downloadUXP-7369c7d7a5eed32963d8af37658286617919f91c.tar
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.gz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.lz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.tar.xz
UXP-7369c7d7a5eed32963d8af37658286617919f91c.zip
Update aom to commit id f5bdeac22930ff4c6b219be49c843db35970b918
Diffstat (limited to 'third_party/aom/av1/decoder/decodemv.c')
-rw-r--r--third_party/aom/av1/decoder/decodemv.c1133
1 files changed, 898 insertions, 235 deletions
diff --git a/third_party/aom/av1/decoder/decodemv.c b/third_party/aom/av1/decoder/decodemv.c
index b3ce86e49..7c8544283 100644
--- a/third_party/aom/av1/decoder/decodemv.c
+++ b/third_party/aom/av1/decoder/decodemv.c
@@ -32,18 +32,8 @@
#include "aom_dsp/aom_dsp_common.h"
#define ACCT_STR __func__
-#if CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
-static INLINE int read_uniform(aom_reader *r, int n) {
- const int l = get_unsigned_bits(n);
- const int m = (1 << l) - n;
- const int v = aom_read_literal(r, l - 1, ACCT_STR);
- assert(l != 0);
- if (v < m)
- return v;
- else
- return (v << 1) - m + aom_read_literal(r, 1, ACCT_STR);
-}
-#endif // CONFIG_EXT_INTRA || CONFIG_FILTER_INTRA || CONFIG_PALETTE
+
+#define DEC_MISMATCH_DEBUG 0
static PREDICTION_MODE read_intra_mode(aom_reader *r, aom_cdf_prob *cdf) {
return (PREDICTION_MODE)
@@ -61,12 +51,8 @@ static int read_delta_qindex(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
const int read_delta_q_flag = (b_col == 0 && b_row == 0);
int rem_bits, thr;
int i, smallval;
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
(void)cm;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
if ((bsize != BLOCK_LARGEST || mbmi->skip == 0) && read_delta_q_flag) {
abs = aom_read_symbol(r, ec_ctx->delta_q_cdf, DELTA_Q_PROBS + 1, ACCT_STR);
@@ -104,12 +90,8 @@ static int read_delta_lflevel(AV1_COMMON *cm, MACROBLOCKD *xd, aom_reader *r,
const int read_delta_lf_flag = (b_col == 0 && b_row == 0);
int rem_bits, thr;
int i, smallval;
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
(void)cm;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
if ((bsize != BLOCK_64X64 || mbmi->skip == 0) && read_delta_lf_flag) {
abs =
@@ -142,51 +124,56 @@ static PREDICTION_MODE read_intra_mode_y(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
aom_reader *r, int size_group) {
const PREDICTION_MODE y_mode =
read_intra_mode(r, ec_ctx->y_mode_cdf[size_group]);
+#if CONFIG_ENTROPY_STATS
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->y_mode[size_group][y_mode];
+#else
+ /* TODO(negge): Can we remove this parameter? */
+ (void)xd;
+#endif // CONFIG_ENTROPY_STATS
return y_mode;
}
-static PREDICTION_MODE read_intra_mode_uv(FRAME_CONTEXT *ec_ctx,
- MACROBLOCKD *xd, aom_reader *r,
- PREDICTION_MODE y_mode) {
- const PREDICTION_MODE uv_mode =
+static UV_PREDICTION_MODE read_intra_mode_uv(FRAME_CONTEXT *ec_ctx,
+ MACROBLOCKD *xd, aom_reader *r,
+ PREDICTION_MODE y_mode) {
+ const UV_PREDICTION_MODE uv_mode =
read_intra_mode(r, ec_ctx->uv_mode_cdf[y_mode]);
+#if CONFIG_ENTROPY_STATS
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->uv_mode[y_mode][uv_mode];
+#else
+ /* TODO(negge): Can we remove this parameter? */
+ (void)xd;
+#endif // CONFIG_ENTROPY_STATS
return uv_mode;
}
#if CONFIG_CFL
-static int read_cfl_alphas(FRAME_CONTEXT *const ec_ctx, aom_reader *r, int skip,
+static int read_cfl_alphas(FRAME_CONTEXT *const ec_ctx, aom_reader *r,
CFL_SIGN_TYPE signs_out[CFL_PRED_PLANES]) {
- if (skip) {
- signs_out[CFL_PRED_U] = CFL_SIGN_POS;
- signs_out[CFL_PRED_V] = CFL_SIGN_POS;
- return 0;
- } else {
- const int ind = aom_read_symbol(r, ec_ctx->cfl_alpha_cdf, CFL_ALPHABET_SIZE,
- "cfl:alpha");
- // Signs are only coded for nonzero values
- // sign == 0 implies negative alpha
- // sign == 1 implies positive alpha
- signs_out[CFL_PRED_U] = cfl_alpha_codes[ind][CFL_PRED_U]
- ? aom_read_bit(r, "cfl:sign")
- : CFL_SIGN_POS;
- signs_out[CFL_PRED_V] = cfl_alpha_codes[ind][CFL_PRED_V]
- ? aom_read_bit(r, "cfl:sign")
- : CFL_SIGN_POS;
-
- return ind;
- }
+ const int ind =
+ aom_read_symbol(r, ec_ctx->cfl_alpha_cdf, CFL_ALPHABET_SIZE, "cfl:alpha");
+ // Signs are only coded for nonzero values
+ // sign == 0 implies negative alpha
+ // sign == 1 implies positive alpha
+ signs_out[CFL_PRED_U] = cfl_alpha_codes[ind][CFL_PRED_U]
+ ? aom_read_bit(r, "cfl:sign")
+ : CFL_SIGN_POS;
+ signs_out[CFL_PRED_V] = cfl_alpha_codes[ind][CFL_PRED_V]
+ ? aom_read_bit(r, "cfl:sign")
+ : CFL_SIGN_POS;
+
+ return ind;
}
#endif
#if CONFIG_EXT_INTER && CONFIG_INTERINTRA
static INTERINTRA_MODE read_interintra_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int size_group) {
- const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)aom_read_tree(
- r, av1_interintra_mode_tree, cm->fc->interintra_mode_prob[size_group],
+ (void)cm;
+ const INTERINTRA_MODE ii_mode = (INTERINTRA_MODE)aom_read_symbol(
+ r, xd->tile_ctx->interintra_mode_cdf[size_group], INTERINTRA_MODES,
ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->interintra_mode[size_group][ii_mode];
@@ -198,9 +185,14 @@ static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
aom_reader *r, int16_t ctx) {
FRAME_COUNTS *counts = xd->counts;
int16_t mode_ctx = ctx & NEWMV_CTX_MASK;
- aom_prob mode_prob = ec_ctx->newmv_prob[mode_ctx];
+ int is_newmv, is_zeromv, is_refmv;
+#if CONFIG_NEW_MULTISYMBOL
+ is_newmv = aom_read_symbol(r, ec_ctx->newmv_cdf[mode_ctx], 2, ACCT_STR) == 0;
+#else
+ is_newmv = aom_read(r, ec_ctx->newmv_prob[mode_ctx], ACCT_STR) == 0;
+#endif
- if (aom_read(r, mode_prob, ACCT_STR) == 0) {
+ if (is_newmv) {
if (counts) ++counts->newmv_mode[mode_ctx][0];
return NEWMV;
}
@@ -210,8 +202,13 @@ static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
mode_ctx = (ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
- mode_prob = ec_ctx->zeromv_prob[mode_ctx];
- if (aom_read(r, mode_prob, ACCT_STR) == 0) {
+#if CONFIG_NEW_MULTISYMBOL
+ is_zeromv =
+ aom_read_symbol(r, ec_ctx->zeromv_cdf[mode_ctx], 2, ACCT_STR) == 0;
+#else
+ is_zeromv = aom_read(r, ec_ctx->zeromv_prob[mode_ctx], ACCT_STR) == 0;
+#endif
+ if (is_zeromv) {
if (counts) ++counts->zeromv_mode[mode_ctx][0];
return ZEROMV;
}
@@ -223,9 +220,13 @@ static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
if (ctx & (1 << SKIP_NEARMV_OFFSET)) mode_ctx = 7;
if (ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) mode_ctx = 8;
- mode_prob = ec_ctx->refmv_prob[mode_ctx];
+#if CONFIG_NEW_MULTISYMBOL
+ is_refmv = aom_read_symbol(r, ec_ctx->refmv_cdf[mode_ctx], 2, ACCT_STR) == 0;
+#else
+ is_refmv = aom_read(r, ec_ctx->refmv_prob[mode_ctx], ACCT_STR) == 0;
+#endif
- if (aom_read(r, mode_prob, ACCT_STR) == 0) {
+ if (is_refmv) {
if (counts) ++counts->refmv_mode[mode_ctx][0];
return NEARESTMV;
@@ -238,28 +239,33 @@ static PREDICTION_MODE read_inter_mode(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
assert(0);
}
-static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
+static void read_drl_idx(FRAME_CONTEXT *ec_ctx, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, aom_reader *r) {
uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
mbmi->ref_mv_idx = 0;
#if CONFIG_EXT_INTER
+#if CONFIG_COMPOUND_SINGLEREF
+ if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV ||
+ mbmi->mode == SR_NEW_NEWMV) {
+#else // !CONFIG_COMPOUND_SINGLEREF
if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV) {
-#else
+#endif // CONFIG_COMPOUND_SINGLEREF
+#else // !CONFIG_EXT_INTER
if (mbmi->mode == NEWMV) {
-#endif
+#endif // CONFIG_EXT_INTER
int idx;
for (idx = 0; idx < 2; ++idx) {
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
- aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!aom_read(r, drl_prob, ACCT_STR)) {
- mbmi->ref_mv_idx = idx;
- if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
- return;
- }
- mbmi->ref_mv_idx = idx + 1;
- if (xd->counts) ++xd->counts->drl_mode[drl_ctx][1];
+#if CONFIG_NEW_MULTISYMBOL
+ int drl_idx = aom_read_symbol(r, ec_ctx->drl_cdf[drl_ctx], 2, ACCT_STR);
+#else
+ int drl_idx = aom_read(r, ec_ctx->drl_prob[drl_ctx], ACCT_STR);
+#endif
+ mbmi->ref_mv_idx = idx + drl_idx;
+ if (xd->counts) ++xd->counts->drl_mode[drl_ctx][drl_idx];
+ if (!drl_idx) return;
}
}
}
@@ -272,14 +278,14 @@ static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
for (idx = 1; idx < 3; ++idx) {
if (xd->ref_mv_count[ref_frame_type] > idx + 1) {
uint8_t drl_ctx = av1_drl_ctx(xd->ref_mv_stack[ref_frame_type], idx);
- aom_prob drl_prob = cm->fc->drl_prob[drl_ctx];
- if (!aom_read(r, drl_prob, ACCT_STR)) {
- mbmi->ref_mv_idx = idx - 1;
- if (xd->counts) ++xd->counts->drl_mode[drl_ctx][0];
- return;
- }
- mbmi->ref_mv_idx = idx;
- if (xd->counts) ++xd->counts->drl_mode[drl_ctx][1];
+#if CONFIG_NEW_MULTISYMBOL
+ int drl_idx = aom_read_symbol(r, ec_ctx->drl_cdf[drl_ctx], 2, ACCT_STR);
+#else
+ int drl_idx = aom_read(r, ec_ctx->drl_prob[drl_ctx], ACCT_STR);
+#endif
+ mbmi->ref_mv_idx = idx + drl_idx - 1;
+ if (xd->counts) ++xd->counts->drl_mode[drl_ctx][drl_idx];
+ if (!drl_idx) return;
}
}
}
@@ -289,39 +295,106 @@ static void read_drl_idx(const AV1_COMMON *cm, MACROBLOCKD *xd,
static MOTION_MODE read_motion_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
MODE_INFO *mi, aom_reader *r) {
MB_MODE_INFO *mbmi = &mi->mbmi;
+#if CONFIG_NEW_MULTISYMBOL
+ (void)cm;
+#endif
+
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+ const MOTION_MODE last_motion_mode_allowed =
+ motion_mode_allowed_wrapper(0,
+#if CONFIG_GLOBAL_MOTION
+ 0, xd->global_motion,
+#endif // CONFIG_GLOBAL_MOTION
+#if CONFIG_WARPED_MOTION
+ xd,
+#endif
+ mi);
+#else
const MOTION_MODE last_motion_mode_allowed = motion_mode_allowed(
-#if CONFIG_GLOBAL_MOTION && SEPARATE_GLOBAL_MOTION
+#if CONFIG_GLOBAL_MOTION
0, xd->global_motion,
-#endif // CONFIG_GLOBAL_MOTION && SEPARATE_GLOBAL_MOTION
+#endif // CONFIG_GLOBAL_MOTION
+#if CONFIG_WARPED_MOTION
+ xd,
+#endif
mi);
+#endif // CONFIG_NCOBMC_ADAPT_WEIGHT
int motion_mode;
FRAME_COUNTS *counts = xd->counts;
if (last_motion_mode_allowed == SIMPLE_TRANSLATION) return SIMPLE_TRANSLATION;
#if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
if (last_motion_mode_allowed == OBMC_CAUSAL) {
+#if CONFIG_NEW_MULTISYMBOL
+ motion_mode =
+ aom_read_symbol(r, xd->tile_ctx->obmc_cdf[mbmi->sb_type], 2, ACCT_STR);
+#else
motion_mode = aom_read(r, cm->fc->obmc_prob[mbmi->sb_type], ACCT_STR);
+#endif
if (counts) ++counts->obmc[mbmi->sb_type][motion_mode];
return (MOTION_MODE)(SIMPLE_TRANSLATION + motion_mode);
} else {
#endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
motion_mode =
- aom_read_tree(r, av1_motion_mode_tree,
- cm->fc->motion_mode_prob[mbmi->sb_type], ACCT_STR);
+ aom_read_symbol(r, xd->tile_ctx->motion_mode_cdf[mbmi->sb_type],
+ MOTION_MODES, ACCT_STR);
if (counts) ++counts->motion_mode[mbmi->sb_type][motion_mode];
return (MOTION_MODE)(SIMPLE_TRANSLATION + motion_mode);
#if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
}
#endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION
}
+
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+static void read_ncobmc_mode(MACROBLOCKD *xd, MODE_INFO *mi,
+#ifndef TRAINING_WEIGHTS
+ NCOBMC_MODE ncobmc_mode[2],
+#else
+ NCOBMC_MODE ncobmc_mode[][4],
+#endif
+ aom_reader *r) {
+ MB_MODE_INFO *mbmi = &mi->mbmi;
+ FRAME_COUNTS *counts = xd->counts;
+ ADAPT_OVERLAP_BLOCK ao_block = adapt_overlap_block_lookup[mbmi->sb_type];
+ if (mbmi->motion_mode != NCOBMC_ADAPT_WEIGHT) return;
+
+#ifndef TRAINING_WEIGHTS
+ ncobmc_mode[0] = aom_read_symbol(r, xd->tile_ctx->ncobmc_mode_cdf[ao_block],
+ MAX_NCOBMC_MODES, ACCT_STR);
+ if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[0]];
+
+ if (mi_size_wide[mbmi->sb_type] != mi_size_high[mbmi->sb_type]) {
+ ncobmc_mode[1] = aom_read_symbol(r, xd->tile_ctx->ncobmc_mode_cdf[ao_block],
+ MAX_NCOBMC_MODES, ACCT_STR);
+ if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[1]];
+ }
+#else
+ int i;
+ for (i = 0; i < 4; ++i) {
+ ncobmc_mode[0][i] = aom_read_symbol(
+ r, xd->tile_ctx->ncobmc_mode_cdf[ao_block], MAX_NCOBMC_MODES, ACCT_STR);
+ if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[0][i]];
+ }
+ if (mi_size_wide[mbmi->sb_type] != mi_size_high[mbmi->sb_type]) {
+ for (i = 0; i < 4; ++i) {
+ ncobmc_mode[1][i] =
+ aom_read_symbol(r, xd->tile_ctx->ncobmc_mode_cdf[ao_block],
+ MAX_NCOBMC_MODES, ACCT_STR);
+ if (counts) ++counts->ncobmc_mode[ao_block][ncobmc_mode[1][i]];
+ }
+ }
+#endif
+}
+#endif
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
#if CONFIG_EXT_INTER
static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r, int16_t ctx) {
+ (void)cm;
const int mode =
- aom_read_tree(r, av1_inter_compound_mode_tree,
- cm->fc->inter_compound_mode_probs[ctx], ACCT_STR);
+ aom_read_symbol(r, xd->tile_ctx->inter_compound_mode_cdf[ctx],
+ INTER_COMPOUND_MODES, ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->inter_compound_mode[ctx][mode];
@@ -329,6 +402,22 @@ static PREDICTION_MODE read_inter_compound_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
assert(is_inter_compound_mode(NEAREST_NEARESTMV + mode));
return NEAREST_NEARESTMV + mode;
}
+
+#if CONFIG_COMPOUND_SINGLEREF
+static PREDICTION_MODE read_inter_singleref_comp_mode(MACROBLOCKD *xd,
+ aom_reader *r,
+ int16_t ctx) {
+ const int mode =
+ aom_read_symbol(r, xd->tile_ctx->inter_singleref_comp_mode_cdf[ctx],
+ INTER_SINGLEREF_COMP_MODES, ACCT_STR);
+ FRAME_COUNTS *counts = xd->counts;
+
+ if (counts) ++counts->inter_singleref_comp_mode[ctx][mode];
+
+ assert(is_inter_singleref_comp_mode(SR_NEAREST_NEARMV + mode));
+ return SR_NEAREST_NEARMV + mode;
+}
+#endif // CONFIG_COMPOUND_SINGLEREF
#endif // CONFIG_EXT_INTER
static int read_segment_id(aom_reader *r, struct segmentation_probs *segp) {
@@ -340,6 +429,10 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
MB_MODE_INFO *mbmi, FRAME_COUNTS *counts,
TX_SIZE tx_size, int depth, int blk_row,
int blk_col, aom_reader *r) {
+#if CONFIG_NEW_MULTISYMBOL
+ FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
+ (void)cm;
+#endif
int is_split = 0;
const int tx_row = blk_row >> 1;
const int tx_col = blk_col >> 1;
@@ -367,7 +460,11 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
return;
}
+#if CONFIG_NEW_MULTISYMBOL
+ is_split = aom_read_symbol(r, ec_ctx->txfm_partition_cdf[ctx], 2, ACCT_STR);
+#else
is_split = aom_read(r, cm->fc->txfm_partition_prob[ctx], ACCT_STR);
+#endif
if (is_split) {
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
@@ -415,12 +512,8 @@ static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
int tx_size_cat, aom_reader *r) {
FRAME_COUNTS *counts = xd->counts;
const int ctx = get_tx_size_context(xd);
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
(void)cm;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
const int depth = aom_read_symbol(r, ec_ctx->tx_size_cdf[tx_size_cat][ctx],
tx_size_cat + 2, ACCT_STR);
@@ -450,16 +543,22 @@ static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int is_inter,
#if CONFIG_RECT_TX && (CONFIG_EXT_TX || CONFIG_VAR_TX)
if (coded_tx_size > max_txsize_lookup[bsize]) {
assert(coded_tx_size == max_txsize_lookup[bsize] + 1);
-#if CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
+#if CONFIG_RECT_TX_EXT
if (is_quarter_tx_allowed(xd, &xd->mi[0]->mbmi, is_inter)) {
- int quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
- FRAME_COUNTS *counts = xd->counts;
+ int quarter_tx;
+
+ if (quarter_txsize_lookup[bsize] != max_txsize_lookup[bsize]) {
+ quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
+ FRAME_COUNTS *counts = xd->counts;
- if (counts) ++counts->quarter_tx_size[quarter_tx];
+ if (counts) ++counts->quarter_tx_size[quarter_tx];
+ } else {
+ quarter_tx = 1;
+ }
return quarter_tx ? quarter_txsize_lookup[bsize]
: max_txsize_rect_lookup[bsize];
}
-#endif // CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
+#endif // CONFIG_RECT_TX_EXT
return max_txsize_rect_lookup[bsize];
}
@@ -509,7 +608,8 @@ static int read_intra_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
aom_reader *r) {
struct segmentation *const seg = &cm->seg;
FRAME_COUNTS *counts = xd->counts;
- struct segmentation_probs *const segp = &cm->fc->seg;
+ FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
+ struct segmentation_probs *const segp = &ec_ctx->seg;
int segment_id;
if (!seg->enabled) return 0; // Default for disabled segmentation
@@ -539,7 +639,9 @@ static int read_inter_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
int mi_row, int mi_col, aom_reader *r) {
struct segmentation *const seg = &cm->seg;
FRAME_COUNTS *counts = xd->counts;
- struct segmentation_probs *const segp = &cm->fc->seg;
+ FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
+ struct segmentation_probs *const segp = &ec_ctx->seg;
+
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int predicted_segment_id, segment_id;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
@@ -565,8 +667,13 @@ static int read_inter_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
if (seg->temporal_update) {
const int ctx = av1_get_pred_context_seg_id(xd);
+#if CONFIG_NEW_MULTISYMBOL
+ aom_cdf_prob *pred_cdf = segp->pred_cdf[ctx];
+ mbmi->seg_id_predicted = aom_read_symbol(r, pred_cdf, 2, ACCT_STR);
+#else
const aom_prob pred_prob = segp->pred_probs[ctx];
mbmi->seg_id_predicted = aom_read(r, pred_prob, ACCT_STR);
+#endif
if (counts) ++counts->seg.pred[ctx][mbmi->seg_id_predicted];
if (mbmi->seg_id_predicted) {
segment_id = predicted_segment_id;
@@ -588,7 +695,12 @@ static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
return 1;
} else {
const int ctx = av1_get_skip_context(xd);
+#if CONFIG_NEW_MULTISYMBOL
+ FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
+ const int skip = aom_read_symbol(r, ec_ctx->skip_cdfs[ctx], 2, ACCT_STR);
+#else
const int skip = aom_read(r, cm->fc->skip_probs[ctx], ACCT_STR);
+#endif
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->skip[ctx][skip];
return skip;
@@ -690,61 +802,54 @@ static void read_palette_mode_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const BLOCK_SIZE bsize = mbmi->sb_type;
- int n;
PALETTE_MODE_INFO *const pmi = &mbmi->palette_mode_info;
if (mbmi->mode == DC_PRED) {
int palette_y_mode_ctx = 0;
- if (above_mi)
+ if (above_mi) {
palette_y_mode_ctx +=
(above_mi->mbmi.palette_mode_info.palette_size[0] > 0);
- if (left_mi)
+ }
+ if (left_mi) {
palette_y_mode_ctx +=
(left_mi->mbmi.palette_mode_info.palette_size[0] > 0);
+ }
if (aom_read(r, av1_default_palette_y_mode_prob[bsize - BLOCK_8X8]
[palette_y_mode_ctx],
ACCT_STR)) {
pmi->palette_size[0] =
- aom_read_tree(r, av1_palette_size_tree,
- av1_default_palette_y_size_prob[bsize - BLOCK_8X8],
- ACCT_STR) +
+ aom_read_symbol(r,
+ xd->tile_ctx->palette_y_size_cdf[bsize - BLOCK_8X8],
+ PALETTE_SIZES, ACCT_STR) +
2;
- n = pmi->palette_size[0];
#if CONFIG_PALETTE_DELTA_ENCODING
read_palette_colors_y(xd, cm->bit_depth, pmi, r);
#else
- int i;
- for (i = 0; i < n; ++i)
+ for (int i = 0; i < pmi->palette_size[0]; ++i)
pmi->palette_colors[i] = aom_read_literal(r, cm->bit_depth, ACCT_STR);
#endif // CONFIG_PALETTE_DELTA_ENCODING
- xd->plane[0].color_index_map[0] = read_uniform(r, n);
- assert(xd->plane[0].color_index_map[0] < n);
}
}
- if (mbmi->uv_mode == DC_PRED) {
+ if (mbmi->uv_mode == UV_DC_PRED) {
const int palette_uv_mode_ctx = (pmi->palette_size[0] > 0);
if (aom_read(r, av1_default_palette_uv_mode_prob[palette_uv_mode_ctx],
ACCT_STR)) {
pmi->palette_size[1] =
- aom_read_tree(r, av1_palette_size_tree,
- av1_default_palette_uv_size_prob[bsize - BLOCK_8X8],
- ACCT_STR) +
+ aom_read_symbol(r,
+ xd->tile_ctx->palette_uv_size_cdf[bsize - BLOCK_8X8],
+ PALETTE_SIZES, ACCT_STR) +
2;
- n = pmi->palette_size[1];
#if CONFIG_PALETTE_DELTA_ENCODING
read_palette_colors_uv(xd, cm->bit_depth, pmi, r);
#else
- int i;
- for (i = 0; i < n; ++i) {
+ for (int i = 0; i < pmi->palette_size[1]; ++i) {
pmi->palette_colors[PALETTE_MAX_SIZE + i] =
aom_read_literal(r, cm->bit_depth, ACCT_STR);
pmi->palette_colors[2 * PALETTE_MAX_SIZE + i] =
aom_read_literal(r, cm->bit_depth, ACCT_STR);
}
#endif // CONFIG_PALETTE_DELTA_ENCODING
- xd->plane[1].color_index_map[0] = read_uniform(r, n);
- assert(xd->plane[1].color_index_map[0] < n);
}
}
}
@@ -769,7 +874,7 @@ static void read_filter_intra_mode_info(AV1_COMMON *const cm,
aom_read(r, cm->fc->filter_intra_probs[0], ACCT_STR);
if (filter_intra_mode_info->use_filter_intra_mode[0]) {
filter_intra_mode_info->filter_intra_mode[0] =
- read_uniform(r, FILTER_INTRA_MODES);
+ av1_read_uniform(r, FILTER_INTRA_MODES);
}
if (counts) {
++counts
@@ -787,7 +892,7 @@ static void read_filter_intra_mode_info(AV1_COMMON *const cm,
(void)mi_col;
#endif // CONFIG_CB4X4
- if (mbmi->uv_mode == DC_PRED
+ if (mbmi->uv_mode == UV_DC_PRED
#if CONFIG_PALETTE
&& mbmi->palette_mode_info.palette_size[1] == 0
#endif // CONFIG_PALETTE
@@ -796,7 +901,7 @@ static void read_filter_intra_mode_info(AV1_COMMON *const cm,
aom_read(r, cm->fc->filter_intra_probs[1], ACCT_STR);
if (filter_intra_mode_info->use_filter_intra_mode[1]) {
filter_intra_mode_info->filter_intra_mode[1] =
- read_uniform(r, FILTER_INTRA_MODES);
+ av1_read_uniform(r, FILTER_INTRA_MODES);
}
if (counts) {
++counts
@@ -812,21 +917,21 @@ static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
#if CONFIG_INTRA_INTERP
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *const ec_ctx = xd->tile_ctx;
-#else
- FRAME_CONTEXT *const ec_ctx = cm->fc;
-#endif // CONFIG_EC_ADAPT
const int ctx = av1_get_pred_context_intra_interp(xd);
int p_angle;
#endif // CONFIG_INTRA_INTERP
(void)cm;
- if (bsize < BLOCK_8X8) return;
+
+ mbmi->angle_delta[0] = 0;
+ mbmi->angle_delta[1] = 0;
+
+ if (!av1_use_angle_delta(bsize)) return;
if (av1_is_directional_mode(mbmi->mode, bsize)) {
mbmi->angle_delta[0] =
- read_uniform(r, 2 * MAX_ANGLE_DELTA + 1) - MAX_ANGLE_DELTA;
+ av1_read_uniform(r, 2 * MAX_ANGLE_DELTA + 1) - MAX_ANGLE_DELTA;
#if CONFIG_INTRA_INTERP
p_angle = mode_to_angle_map[mbmi->mode] + mbmi->angle_delta[0] * ANGLE_STEP;
if (av1_is_intra_filter_switchable(p_angle)) {
@@ -840,9 +945,9 @@ static void read_intra_angle_info(AV1_COMMON *const cm, MACROBLOCKD *const xd,
#endif // CONFIG_INTRA_INTERP
}
- if (av1_is_directional_mode(mbmi->uv_mode, bsize)) {
+ if (av1_is_directional_mode(get_uv_mode(mbmi->uv_mode), bsize)) {
mbmi->angle_delta[1] =
- read_uniform(r, 2 * MAX_ANGLE_DELTA + 1) - MAX_ANGLE_DELTA;
+ av1_read_uniform(r, 2 * MAX_ANGLE_DELTA + 1) - MAX_ANGLE_DELTA;
}
}
#endif // CONFIG_EXT_INTRA
@@ -852,28 +957,28 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
int supertx_enabled,
#endif
#if CONFIG_TXK_SEL
- int block, int plane,
+ int blk_row, int blk_col, int block, int plane,
+ TX_SIZE tx_size,
#endif
aom_reader *r) {
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const int inter_block = is_inter_block(mbmi);
+#if !CONFIG_TXK_SEL
#if CONFIG_VAR_TX
const TX_SIZE tx_size = inter_block ? mbmi->min_tx_size : mbmi->tx_size;
#else
const TX_SIZE tx_size = mbmi->tx_size;
#endif
-#if CONFIG_EC_ADAPT
+#endif // !CONFIG_TXK_SEL
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
#if !CONFIG_TXK_SEL
TX_TYPE *tx_type = &mbmi->tx_type;
#else
// only y plane's tx_type is transmitted
if (plane > 0) return;
- TX_TYPE *tx_type = &mbmi->txk_type[block];
+ (void)block;
+ TX_TYPE *tx_type = &mbmi->txk_type[(blk_row << 4) + blk_col];
#endif
if (!FIXED_TX_TYPE) {
@@ -890,23 +995,22 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
const int eset = get_ext_tx_set(tx_size, mbmi->sb_type, inter_block,
cm->reduced_tx_set_used);
+ // eset == 0 should correspond to a set with only DCT_DCT and
+ // there is no need to read the tx_type
+ assert(eset != 0);
FRAME_COUNTS *counts = xd->counts;
if (inter_block) {
- if (eset > 0) {
- *tx_type = av1_ext_tx_inter_inv[eset][aom_read_symbol(
- r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
- ext_tx_cnt_inter[eset], ACCT_STR)];
- if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type];
- }
+ *tx_type = av1_ext_tx_inter_inv[eset][aom_read_symbol(
+ r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
+ ext_tx_cnt_inter[eset], ACCT_STR)];
+ if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type];
} else if (ALLOW_INTRA_EXT_TX) {
- if (eset > 0) {
- *tx_type = av1_ext_tx_intra_inv[eset][aom_read_symbol(
- r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
- ext_tx_cnt_intra[eset], ACCT_STR)];
- if (counts)
- ++counts->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type];
- }
+ *tx_type = av1_ext_tx_intra_inv[eset][aom_read_symbol(
+ r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
+ ext_tx_cnt_intra[eset], ACCT_STR)];
+ if (counts)
+ ++counts->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type];
}
} else {
*tx_type = DCT_DCT;
@@ -939,27 +1043,27 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
}
#endif // CONFIG_EXT_TX
}
+#if FIXED_TX_TYPE
+ assert(mbmi->tx_type == DCT_DCT);
+#endif
}
#if CONFIG_INTRABC
static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
nmv_context *ctx, nmv_context_counts *counts,
- int allow_hp);
+ MvSubpelPrecision precision);
static INLINE int is_mv_valid(const MV *mv);
static INLINE int assign_dv(AV1_COMMON *cm, MACROBLOCKD *xd, int_mv *mv,
const int_mv *ref_mv, int mi_row, int mi_col,
BLOCK_SIZE bsize, aom_reader *r) {
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
(void)cm;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
FRAME_COUNTS *counts = xd->counts;
nmv_context_counts *const dv_counts = counts ? &counts->dv : NULL;
- read_mv(r, &mv->as_mv, &ref_mv->as_mv, &ec_ctx->ndvc, dv_counts, 0);
+ read_mv(r, &mv->as_mv, &ref_mv->as_mv, &ec_ctx->ndvc, dv_counts,
+ MV_SUBPEL_NONE);
int valid = is_mv_valid(&mv->as_mv) &&
is_dv_valid(mv->as_mv, &xd->tile, mi_row, mi_col, bsize);
return valid;
@@ -982,11 +1086,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
// TODO(slavarnway): move x_mis, y_mis into xd ?????
const int x_mis = AOMMIN(cm->mi_cols - mi_col, bw);
const int y_mis = AOMMIN(cm->mi_rows - mi_row, bh);
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
mbmi->segment_id = read_intra_segment_id(cm, xd, mi_offset, x_mis, y_mis, r);
mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r);
@@ -1013,13 +1113,13 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
mbmi->ref_frame[0] = INTRA_FRAME;
mbmi->ref_frame[1] = NONE_FRAME;
- mbmi->tx_size = read_tx_size(cm, xd, 0, 1, r);
#if CONFIG_INTRABC
if (bsize >= BLOCK_8X8 && cm->allow_screen_content_tools) {
mbmi->use_intrabc = aom_read(r, ec_ctx->intrabc_prob, ACCT_STR);
if (mbmi->use_intrabc) {
- mbmi->mode = mbmi->uv_mode = DC_PRED;
+ mbmi->tx_size = read_tx_size(cm, xd, 1, !mbmi->skip, r);
+ mbmi->mode = mbmi->uv_mode = UV_DC_PRED;
#if CONFIG_DUAL_FILTER
for (int idx = 0; idx < 4; ++idx) mbmi->interp_filter[idx] = BILINEAR;
#else
@@ -1066,6 +1166,8 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
}
#endif // CONFIG_INTRABC
+ mbmi->tx_size = read_tx_size(cm, xd, 0, 1, r);
+
#if CONFIG_CB4X4
(void)i;
mbmi->mode =
@@ -1106,13 +1208,15 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
#if CONFIG_CFL
// TODO(ltrudeau) support PALETTE
- if (mbmi->uv_mode == DC_PRED) {
- mbmi->cfl_alpha_idx =
- read_cfl_alphas(ec_ctx, r, mbmi->skip, mbmi->cfl_alpha_signs);
+ if (mbmi->uv_mode == UV_DC_PRED) {
+ mbmi->cfl_alpha_idx = read_cfl_alphas(ec_ctx, r, mbmi->cfl_alpha_signs);
}
#endif // CONFIG_CFL
#if CONFIG_CB4X4
+ } else {
+ // Avoid decoding angle_info if there is is no chroma prediction
+ mbmi->uv_mode = UV_DC_PRED;
}
#endif
@@ -1141,16 +1245,28 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm,
#endif // !CONFIG_TXK_SEL
}
-static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) {
+static int read_mv_component(aom_reader *r, nmv_component *mvcomp,
+#if CONFIG_INTRABC
+ int use_subpel,
+#endif // CONFIG_INTRABC
+ int usehp) {
int mag, d, fr, hp;
+#if CONFIG_NEW_MULTISYMBOL
+ const int sign = aom_read_bit(r, ACCT_STR);
+#else
const int sign = aom_read(r, mvcomp->sign, ACCT_STR);
+#endif
const int mv_class =
aom_read_symbol(r, mvcomp->class_cdf, MV_CLASSES, ACCT_STR);
const int class0 = mv_class == MV_CLASS_0;
// Integer part
if (class0) {
+#if CONFIG_NEW_MULTISYMBOL
+ d = aom_read_symbol(r, mvcomp->class0_cdf, CLASS0_SIZE, ACCT_STR);
+#else
d = aom_read(r, mvcomp->class0[0], ACCT_STR);
+#endif
mag = 0;
} else {
int i;
@@ -1161,13 +1277,29 @@ static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) {
mag = CLASS0_SIZE << (mv_class + 2);
}
- // Fractional part
- fr = aom_read_symbol(r, class0 ? mvcomp->class0_fp_cdf[d] : mvcomp->fp_cdf,
- MV_FP_SIZE, ACCT_STR);
-
- // High precision part (if hp is not used, the default value of the hp is 1)
+#if CONFIG_INTRABC
+ if (use_subpel) {
+#endif // CONFIG_INTRABC
+ // Fractional part
+ fr = aom_read_symbol(r, class0 ? mvcomp->class0_fp_cdf[d] : mvcomp->fp_cdf,
+ MV_FP_SIZE, ACCT_STR);
+
+// High precision part (if hp is not used, the default value of the hp is 1)
+#if CONFIG_NEW_MULTISYMBOL
+ hp = usehp ? aom_read_symbol(
+ r, class0 ? mvcomp->class0_hp_cdf : mvcomp->hp_cdf, 2,
+ ACCT_STR)
+ : 1;
+#else
hp = usehp ? aom_read(r, class0 ? mvcomp->class0_hp : mvcomp->hp, ACCT_STR)
: 1;
+#endif
+#if CONFIG_INTRABC
+ } else {
+ fr = 3;
+ hp = 1;
+ }
+#endif // CONFIG_INTRABC
// Result
mag += ((d << 3) | (fr << 1) | hp) + 1;
@@ -1176,19 +1308,27 @@ static int read_mv_component(aom_reader *r, nmv_component *mvcomp, int usehp) {
static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
nmv_context *ctx, nmv_context_counts *counts,
- int allow_hp) {
+ MvSubpelPrecision precision) {
MV_JOINT_TYPE joint_type;
MV diff = { 0, 0 };
joint_type =
(MV_JOINT_TYPE)aom_read_symbol(r, ctx->joint_cdf, MV_JOINTS, ACCT_STR);
if (mv_joint_vertical(joint_type))
- diff.row = read_mv_component(r, &ctx->comps[0], allow_hp);
+ diff.row = read_mv_component(r, &ctx->comps[0],
+#if CONFIG_INTRABC
+ precision > MV_SUBPEL_NONE,
+#endif // CONFIG_INTRABC
+ precision > MV_SUBPEL_LOW_PRECISION);
if (mv_joint_horizontal(joint_type))
- diff.col = read_mv_component(r, &ctx->comps[1], allow_hp);
+ diff.col = read_mv_component(r, &ctx->comps[1],
+#if CONFIG_INTRABC
+ precision > MV_SUBPEL_NONE,
+#endif // CONFIG_INTRABC
+ precision > MV_SUBPEL_LOW_PRECISION);
- av1_inc_mv(&diff, counts, allow_hp);
+ av1_inc_mv(&diff, counts, precision);
mv->row = ref->row + diff.row;
mv->col = ref->col + diff.col;
@@ -1202,8 +1342,13 @@ static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
#endif
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
const int ctx = av1_get_reference_mode_context(cm, xd);
+#if CONFIG_NEW_MULTISYMBOL
+ const REFERENCE_MODE mode = (REFERENCE_MODE)aom_read_symbol(
+ r, xd->tile_ctx->comp_inter_cdf[ctx], 2, ACCT_STR);
+#else
const REFERENCE_MODE mode =
(REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx], ACCT_STR);
+#endif
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->comp_inter[ctx][mode];
return mode; // SINGLE_REFERENCE or COMPOUND_REFERENCE
@@ -1212,11 +1357,50 @@ static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
}
}
+#if CONFIG_NEW_MULTISYMBOL
+#define READ_REF_BIT(pname) \
+ aom_read_symbol(r, av1_get_pred_cdf_##pname(cm, xd), 2, ACCT_STR)
+#else
+#define READ_REF_BIT(pname) \
+ aom_read(r, av1_get_pred_prob_##pname(cm, xd), ACCT_STR)
+#endif
+
+#if CONFIG_EXT_COMP_REFS
+static REFERENCE_MODE read_comp_reference_type(AV1_COMMON *cm,
+ const MACROBLOCKD *xd,
+ aom_reader *r) {
+ const int ctx = av1_get_comp_reference_type_context(xd);
+#if USE_UNI_COMP_REFS
+ COMP_REFERENCE_TYPE comp_ref_type;
+#if CONFIG_VAR_REFS
+ if ((L_OR_L2(cm) || L3_OR_G(cm)) && BWD_OR_ALT(cm))
+ if (L_AND_L2(cm) || L_AND_L3(cm) || L_AND_G(cm) || BWD_AND_ALT(cm))
+#endif // CONFIG_VAR_REFS
+ comp_ref_type = (COMP_REFERENCE_TYPE)aom_read(
+ r, cm->fc->comp_ref_type_prob[ctx], ACCT_STR);
+#if CONFIG_VAR_REFS
+ else
+ comp_ref_type = BIDIR_COMP_REFERENCE;
+ else
+ comp_ref_type = UNIDIR_COMP_REFERENCE;
+#endif // CONFIG_VAR_REFS
+#else // !USE_UNI_COMP_REFS
+ // TODO(zoeliu): Temporarily turn off uni-directional comp refs
+ const COMP_REFERENCE_TYPE comp_ref_type = BIDIR_COMP_REFERENCE;
+#endif // USE_UNI_COMP_REFS
+ FRAME_COUNTS *counts = xd->counts;
+ if (counts) ++counts->comp_ref_type[ctx][comp_ref_type];
+ return comp_ref_type; // UNIDIR_COMP_REFERENCE or BIDIR_COMP_REFERENCE
+}
+#endif // CONFIG_EXT_COMP_REFS
+
// Read the referncence frame
static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
aom_reader *r, int segment_id,
MV_REFERENCE_FRAME ref_frame[2]) {
+#if CONFIG_EXT_COMP_REFS
FRAME_CONTEXT *const fc = cm->fc;
+#endif
FRAME_COUNTS *counts = xd->counts;
if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
@@ -1227,81 +1411,279 @@ static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
const REFERENCE_MODE mode = read_block_reference_mode(cm, xd, r);
// FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
if (mode == COMPOUND_REFERENCE) {
-#if CONFIG_ONE_SIDED_COMPOUND // Normative in decoder (for low delay)
+#if CONFIG_EXT_COMP_REFS
+ const COMP_REFERENCE_TYPE comp_ref_type =
+ read_comp_reference_type(cm, xd, r);
+
+#if !USE_UNI_COMP_REFS
+ // TODO(zoeliu): Temporarily turn off uni-directional comp refs
+ assert(comp_ref_type == BIDIR_COMP_REFERENCE);
+#endif // !USE_UNI_COMP_REFS
+
+ if (comp_ref_type == UNIDIR_COMP_REFERENCE) {
+ const int ctx = av1_get_pred_context_uni_comp_ref_p(xd);
+ int bit;
+#if CONFIG_VAR_REFS
+ if ((L_AND_L2(cm) || L_AND_L3(cm) || L_AND_G(cm)) && BWD_AND_ALT(cm))
+#endif // CONFIG_VAR_REFS
+ bit = aom_read(r, fc->uni_comp_ref_prob[ctx][0], ACCT_STR);
+#if CONFIG_VAR_REFS
+ else
+ bit = BWD_AND_ALT(cm);
+#endif // CONFIG_VAR_REFS
+ if (counts) ++counts->uni_comp_ref[ctx][0][bit];
+
+ if (bit) {
+ ref_frame[0] = BWDREF_FRAME;
+ ref_frame[1] = ALTREF_FRAME;
+ } else {
+ const int ctx1 = av1_get_pred_context_uni_comp_ref_p1(xd);
+ int bit1;
+#if CONFIG_VAR_REFS
+ if (L_AND_L2(cm) && (L_AND_L3(cm) || L_AND_G(cm)))
+#endif // CONFIG_VAR_REFS
+ bit1 = aom_read(r, fc->uni_comp_ref_prob[ctx1][1], ACCT_STR);
+#if CONFIG_VAR_REFS
+ else
+ bit1 = L_AND_L3(cm) || L_AND_G(cm);
+#endif // CONFIG_VAR_REFS
+ if (counts) ++counts->uni_comp_ref[ctx1][1][bit1];
+
+ if (bit1) {
+ const int ctx2 = av1_get_pred_context_uni_comp_ref_p2(xd);
+ int bit2;
+#if CONFIG_VAR_REFS
+ if (L_AND_L3(cm) && L_AND_G(cm))
+#endif // CONFIG_VAR_REFS
+ bit2 = aom_read(r, fc->uni_comp_ref_prob[ctx2][2], ACCT_STR);
+#if CONFIG_VAR_REFS
+ else
+ bit2 = L_AND_G(cm);
+#endif // CONFIG_VAR_REFS
+ if (counts) ++counts->uni_comp_ref[ctx2][2][bit2];
+
+ if (bit2) {
+ ref_frame[0] = LAST_FRAME;
+ ref_frame[1] = GOLDEN_FRAME;
+ } else {
+ ref_frame[0] = LAST_FRAME;
+ ref_frame[1] = LAST3_FRAME;
+ }
+ } else {
+ ref_frame[0] = LAST_FRAME;
+ ref_frame[1] = LAST2_FRAME;
+ }
+ }
+
+ return;
+ }
+
+ assert(comp_ref_type == BIDIR_COMP_REFERENCE);
+#endif // CONFIG_EXT_COMP_REFS
+
+// Normative in decoder (for low delay)
+#if CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS
const int idx = 1;
-#else
+#else // !(CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS)
#if CONFIG_EXT_REFS
const int idx = cm->ref_frame_sign_bias[cm->comp_bwd_ref[0]];
-#else
+#else // !CONFIG_EXT_REFS
const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
#endif // CONFIG_EXT_REFS
-#endif
- const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
+#endif // CONFIG_ONE_SIDED_COMPOUND || CONFIG_EXT_COMP_REFS
- const int bit = aom_read(r, fc->comp_ref_prob[ctx][0], ACCT_STR);
+ const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
+#if CONFIG_VAR_REFS
+ int bit;
+ // Test need to explicitly code (L,L2) vs (L3,G) branch node in tree
+ if (L_OR_L2(cm) && L3_OR_G(cm))
+ bit = READ_REF_BIT(comp_ref_p);
+ else
+ bit = L3_OR_G(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit = READ_REF_BIT(comp_ref_p);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->comp_ref[ctx][0][bit];
#if CONFIG_EXT_REFS
// Decode forward references.
if (!bit) {
const int ctx1 = av1_get_pred_context_comp_ref_p1(cm, xd);
- const int bit1 = aom_read(r, fc->comp_ref_prob[ctx1][1], ACCT_STR);
+#if CONFIG_VAR_REFS
+ int bit1;
+ // Test need to explicitly code (L) vs (L2) branch node in tree
+ if (L_AND_L2(cm))
+ bit1 = READ_REF_BIT(comp_ref_p1);
+ else
+ bit1 = LAST_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit1 = READ_REF_BIT(comp_ref_p1);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->comp_ref[ctx1][1][bit1];
ref_frame[!idx] = cm->comp_fwd_ref[bit1 ? 0 : 1];
} else {
const int ctx2 = av1_get_pred_context_comp_ref_p2(cm, xd);
- const int bit2 = aom_read(r, fc->comp_ref_prob[ctx2][2], ACCT_STR);
+#if CONFIG_VAR_REFS
+ int bit2;
+ // Test need to explicitly code (L3) vs (G) branch node in tree
+ if (L3_AND_G(cm))
+ bit2 = READ_REF_BIT(comp_ref_p2);
+ else
+ bit2 = GOLDEN_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit2 = READ_REF_BIT(comp_ref_p2);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->comp_ref[ctx2][2][bit2];
ref_frame[!idx] = cm->comp_fwd_ref[bit2 ? 3 : 2];
}
// Decode backward references.
- {
- const int ctx_bwd = av1_get_pred_context_comp_bwdref_p(cm, xd);
- const int bit_bwd =
- aom_read(r, fc->comp_bwdref_prob[ctx_bwd][0], ACCT_STR);
- if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
- ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
+ const int ctx_bwd = av1_get_pred_context_comp_bwdref_p(cm, xd);
+#if CONFIG_VAR_REFS
+ int bit_bwd;
+// Test need to explicitly code (BWD/ALT2) vs (ALT) branch node in tree
+#if CONFIG_ALTREF2
+ const int bit_bwd_uncertain = BWD_OR_ALT2(cm) && ALTREF_IS_VALID(cm);
+#else // !CONFIG_ALTREF2
+ const int bit_bwd_uncertain = BWD_AND_ALT(cm);
+#endif // CONFIG_ALTREF2
+ if (bit_bwd_uncertain)
+ bit_bwd = READ_REF_BIT(comp_bwdref_p);
+ else
+ bit_bwd = ALTREF_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit_bwd = READ_REF_BIT(comp_bwdref_p);
+#endif // CONFIG_VAR_REFS
+ if (counts) ++counts->comp_bwdref[ctx_bwd][0][bit_bwd];
+#if CONFIG_ALTREF2
+ if (!bit_bwd) {
+ const int ctx1_bwd = av1_get_pred_context_comp_bwdref_p1(cm, xd);
+#if CONFIG_VAR_REFS
+ int bit1_bwd;
+ if (BWD_AND_ALT2(cm))
+ bit1_bwd = READ_REF_BIT(comp_bwdref_p1);
+ else
+ bit1_bwd = ALTREF2_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit1_bwd = READ_REF_BIT(comp_bwdref_p1);
+#endif // CONFIG_VAR_REFS
+ if (counts) ++counts->comp_bwdref[ctx1_bwd][1][bit1_bwd];
+ ref_frame[idx] = cm->comp_bwd_ref[bit1_bwd];
+ } else {
+ ref_frame[idx] = cm->comp_bwd_ref[2];
}
-#else
+#else // !CONFIG_ALTREF2
+ ref_frame[idx] = cm->comp_bwd_ref[bit_bwd];
+#endif // CONFIG_ALTREF2
+#else // !CONFIG_EXT_REFS
ref_frame[!idx] = cm->comp_var_ref[bit];
ref_frame[idx] = cm->comp_fixed_ref;
#endif // CONFIG_EXT_REFS
} else if (mode == SINGLE_REFERENCE) {
#if CONFIG_EXT_REFS
const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
- const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0], ACCT_STR);
+#if CONFIG_VAR_REFS
+ int bit0;
+ // Test need to explicitly code (L,L2,L3,G) vs (BWD,ALT) branch node in
+ // tree
+ if ((L_OR_L2(cm) || L3_OR_G(cm)) && BWD_OR_ALT(cm))
+ bit0 = READ_REF_BIT(single_ref_p1);
+ else
+ bit0 = BWD_OR_ALT(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit0 = READ_REF_BIT(single_ref_p1);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
- const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1], ACCT_STR);
+#if CONFIG_VAR_REFS
+ int bit1;
+// Test need to explicitly code (BWD/ALT2) vs (ALT) branch node in tree
+#if CONFIG_ALTREF2
+ const int bit1_uncertain = BWD_OR_ALT2(cm) && ALTREF_IS_VALID(cm);
+#else // !CONFIG_ALTREF2
+ const int bit1_uncertain = BWD_AND_ALT(cm);
+#endif // CONFIG_ALTREF2
+ if (bit1_uncertain)
+ bit1 = READ_REF_BIT(single_ref_p2);
+ else
+ bit1 = ALTREF_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit1 = READ_REF_BIT(single_ref_p2);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->single_ref[ctx1][1][bit1];
+#if CONFIG_ALTREF2
+ if (!bit1) {
+ const int ctx5 = av1_get_pred_context_single_ref_p6(xd);
+#if CONFIG_VAR_REFS
+ int bit5;
+ if (BWD_AND_ALT2(cm))
+ bit5 = READ_REF_BIT(single_ref_p6);
+ else
+ bit5 = ALTREF2_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit5 = READ_REF_BIT(single_ref_p6);
+#endif // CONFIG_VAR_REFS
+ if (counts) ++counts->single_ref[ctx5][5][bit5];
+ ref_frame[0] = bit5 ? ALTREF2_FRAME : BWDREF_FRAME;
+ } else {
+ ref_frame[0] = ALTREF_FRAME;
+ }
+#else // !CONFIG_ALTREF2
ref_frame[0] = bit1 ? ALTREF_FRAME : BWDREF_FRAME;
+#endif // CONFIG_ALTREF2
} else {
const int ctx2 = av1_get_pred_context_single_ref_p3(xd);
- const int bit2 = aom_read(r, fc->single_ref_prob[ctx2][2], ACCT_STR);
+#if CONFIG_VAR_REFS
+ int bit2;
+ // Test need to explicitly code (L,L2) vs (L3,G) branch node in tree
+ if (L_OR_L2(cm) && L3_OR_G(cm))
+ bit2 = READ_REF_BIT(single_ref_p3);
+ else
+ bit2 = L3_OR_G(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit2 = READ_REF_BIT(single_ref_p3);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->single_ref[ctx2][2][bit2];
if (bit2) {
const int ctx4 = av1_get_pred_context_single_ref_p5(xd);
- const int bit4 = aom_read(r, fc->single_ref_prob[ctx4][4], ACCT_STR);
+#if CONFIG_VAR_REFS
+ int bit4;
+ // Test need to explicitly code (L3) vs (G) branch node in tree
+ if (L3_AND_G(cm))
+ bit4 = READ_REF_BIT(single_ref_p5);
+ else
+ bit4 = GOLDEN_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit4 = READ_REF_BIT(single_ref_p5);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->single_ref[ctx4][4][bit4];
ref_frame[0] = bit4 ? GOLDEN_FRAME : LAST3_FRAME;
} else {
const int ctx3 = av1_get_pred_context_single_ref_p4(xd);
- const int bit3 = aom_read(r, fc->single_ref_prob[ctx3][3], ACCT_STR);
+#if CONFIG_VAR_REFS
+ int bit3;
+ // Test need to explicitly code (L) vs (L2) branch node in tree
+ if (L_AND_L2(cm))
+ bit3 = READ_REF_BIT(single_ref_p4);
+ else
+ bit3 = LAST2_IS_VALID(cm);
+#else // !CONFIG_VAR_REFS
+ const int bit3 = READ_REF_BIT(single_ref_p4);
+#endif // CONFIG_VAR_REFS
if (counts) ++counts->single_ref[ctx3][3][bit3];
ref_frame[0] = bit3 ? LAST2_FRAME : LAST_FRAME;
}
}
-#else
+#else // !CONFIG_EXT_REFS
const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
- const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0], ACCT_STR);
+ const int bit0 = READ_REF_BIT(single_ref_p1);
if (counts) ++counts->single_ref[ctx0][0][bit0];
if (bit0) {
const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
- const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1], ACCT_STR);
+ const int bit1 = READ_REF_BIT(single_ref_p2);
if (counts) ++counts->single_ref[ctx1][1][bit1];
ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
} else {
@@ -1321,11 +1703,7 @@ static INLINE void read_mb_interp_filter(AV1_COMMON *const cm,
MB_MODE_INFO *const mbmi,
aom_reader *r) {
FRAME_COUNTS *counts = xd->counts;
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
if (!av1_is_interp_needed(xd)) {
set_default_interp_filters(mbmi, cm->interp_filter);
@@ -1384,11 +1762,7 @@ static void read_intra_block_mode_info(AV1_COMMON *const cm, const int mi_row,
mbmi->ref_frame[0] = INTRA_FRAME;
mbmi->ref_frame[1] = NONE_FRAME;
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
#if CONFIG_CB4X4
(void)i;
@@ -1429,14 +1803,9 @@ static void read_intra_block_mode_info(AV1_COMMON *const cm, const int mi_row,
#if CONFIG_CFL
// TODO(ltrudeau) support PALETTE
- if (mbmi->uv_mode == DC_PRED) {
- mbmi->cfl_alpha_idx = read_cfl_alphas(
-#if CONFIG_EC_ADAPT
- xd->tile_ctx,
-#else
- cm->fc,
-#endif // CONFIG_EC_ADAPT
- r, mbmi->skip, mbmi->cfl_alpha_signs);
+ if (mbmi->uv_mode == UV_DC_PRED) {
+ mbmi->cfl_alpha_idx =
+ read_cfl_alphas(xd->tile_ctx, r, mbmi->cfl_alpha_signs);
}
#endif // CONFIG_CFL
@@ -1475,11 +1844,7 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
aom_reader *r) {
int i;
int ret = 1;
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
#if CONFIG_CB4X4
@@ -1550,6 +1915,84 @@ static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
break;
}
#if CONFIG_EXT_INTER
+#if CONFIG_COMPOUND_SINGLEREF
+ case SR_NEAREST_NEARMV: {
+ assert(!is_compound);
+ mv[0].as_int = nearest_mv[0].as_int;
+ mv[1].as_int = near_mv[0].as_int;
+ break;
+ }
+ /*
+ case SR_NEAREST_NEWMV: {
+ assert(!is_compound);
+ mv[0].as_int = nearest_mv[0].as_int;
+
+ FRAME_COUNTS *counts = xd->counts;
+ int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type],
+ xd->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
+ nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx];
+ nmv_context_counts *const mv_counts =
+ counts ? &counts->mv[nmv_ctx] : NULL;
+ read_mv(r, &mv[1].as_mv, &ref_mv[0].as_mv, nmvc, mv_counts, allow_hp);
+ ret = ret && is_mv_valid(&mv[1].as_mv);
+ break;
+ }*/
+ case SR_NEAR_NEWMV: {
+ assert(!is_compound);
+ mv[0].as_int = near_mv[0].as_int;
+
+ FRAME_COUNTS *counts = xd->counts;
+ int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type],
+ xd->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
+ nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx];
+ nmv_context_counts *const mv_counts =
+ counts ? &counts->mv[nmv_ctx] : NULL;
+ read_mv(r, &mv[1].as_mv, &ref_mv[0].as_mv, nmvc, mv_counts, allow_hp);
+ ret = ret && is_mv_valid(&mv[1].as_mv);
+ break;
+ }
+ case SR_ZERO_NEWMV: {
+ assert(!is_compound);
+#if CONFIG_GLOBAL_MOTION
+ mv[0].as_int = gm_get_motion_vector(&cm->global_motion[ref_frame[0]],
+ cm->allow_high_precision_mv, bsize,
+ mi_col, mi_row, block)
+ .as_int;
+#else
+ mv[0].as_int = 0;
+#endif // CONFIG_GLOBAL_MOTION
+
+ FRAME_COUNTS *counts = xd->counts;
+ int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
+ int nmv_ctx = av1_nmv_ctx(xd->ref_mv_count[rf_type],
+ xd->ref_mv_stack[rf_type], 0, mbmi->ref_mv_idx);
+ nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx];
+ nmv_context_counts *const mv_counts =
+ counts ? &counts->mv[nmv_ctx] : NULL;
+ read_mv(r, &mv[1].as_mv, &ref_mv[0].as_mv, nmvc, mv_counts, allow_hp);
+ ret = ret && is_mv_valid(&mv[1].as_mv);
+ break;
+ }
+ case SR_NEW_NEWMV: {
+ assert(!is_compound);
+
+ FRAME_COUNTS *counts = xd->counts;
+ for (i = 0; i < 2; ++i) {
+ int8_t rf_type = av1_ref_frame_type(mbmi->ref_frame);
+ int nmv_ctx =
+ av1_nmv_ctx(xd->ref_mv_count[rf_type], xd->ref_mv_stack[rf_type], 0,
+ mbmi->ref_mv_idx);
+ nmv_context *const nmvc = &ec_ctx->nmvc[nmv_ctx];
+ nmv_context_counts *const mv_counts =
+ counts ? &counts->mv[nmv_ctx] : NULL;
+ read_mv(r, &mv[i].as_mv, &ref_mv[0].as_mv, nmvc, mv_counts, allow_hp);
+ ret = ret && is_mv_valid(&mv[i].as_mv);
+ }
+ break;
+ }
+#endif // CONFIG_COMPOUND_SINGLEREF
case NEW_NEWMV: {
FRAME_COUNTS *counts = xd->counts;
assert(is_compound);
@@ -1664,19 +2107,102 @@ static int read_is_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd,
return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
} else {
const int ctx = av1_get_intra_inter_context(xd);
+#if CONFIG_NEW_MULTISYMBOL
+ FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
+ const int is_inter =
+ aom_read_symbol(r, ec_ctx->intra_inter_cdf[ctx], 2, ACCT_STR);
+#else
const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx], ACCT_STR);
+#endif
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->intra_inter[ctx][is_inter];
return is_inter;
}
}
+#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+static int read_is_inter_singleref_comp_mode(AV1_COMMON *const cm,
+ MACROBLOCKD *const xd,
+ int segment_id, aom_reader *r) {
+ if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) return 0;
+
+ const int ctx = av1_get_inter_mode_context(xd);
+ const int is_singleref_comp_mode =
+ aom_read(r, cm->fc->comp_inter_mode_prob[ctx], ACCT_STR);
+ FRAME_COUNTS *counts = xd->counts;
+
+ if (counts) ++counts->comp_inter_mode[ctx][is_singleref_comp_mode];
+ return is_singleref_comp_mode;
+}
+#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+
static void fpm_sync(void *const data, int mi_row) {
AV1Decoder *const pbi = (AV1Decoder *)data;
av1_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
mi_row << pbi->common.mib_size_log2);
}
+#if DEC_MISMATCH_DEBUG
+static void dec_dump_logs(AV1_COMMON *cm, MODE_INFO *const mi,
+ MACROBLOCKD *const xd, int mi_row, int mi_col,
+ int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES],
+ int16_t mode_ctx) {
+ int_mv mv[2] = { { 0 } };
+ int ref;
+ MB_MODE_INFO *const mbmi = &mi->mbmi;
+ for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref)
+ mv[ref].as_mv = mbmi->mv[ref].as_mv;
+
+ int interp_ctx[2] = { -1 };
+ int interp_filter[2] = { cm->interp_filter };
+ if (cm->interp_filter == SWITCHABLE) {
+ int dir;
+ for (dir = 0; dir < 2; ++dir) {
+ if (has_subpel_mv_component(xd->mi[0], xd, dir) ||
+ (mbmi->ref_frame[1] > INTRA_FRAME &&
+ has_subpel_mv_component(xd->mi[0], xd, dir + 2))) {
+ interp_ctx[dir] = av1_get_pred_context_switchable_interp(xd, dir);
+ interp_filter[dir] = mbmi->interp_filter[dir];
+ } else {
+ interp_filter[dir] = EIGHTTAP_REGULAR;
+ }
+ }
+ }
+
+ const int16_t newmv_ctx = mode_ctx & NEWMV_CTX_MASK;
+ int16_t zeromv_ctx = -1;
+ int16_t refmv_ctx = -1;
+ if (mbmi->mode != NEWMV) {
+ if (mode_ctx & (1 << ALL_ZERO_FLAG_OFFSET)) assert(mbmi->mode == ZEROMV);
+ zeromv_ctx = (mode_ctx >> ZEROMV_OFFSET) & ZEROMV_CTX_MASK;
+ if (mbmi->mode != ZEROMV) {
+ refmv_ctx = (mode_ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
+ if (mode_ctx & (1 << SKIP_NEARESTMV_OFFSET)) refmv_ctx = 6;
+ if (mode_ctx & (1 << SKIP_NEARMV_OFFSET)) refmv_ctx = 7;
+ if (mode_ctx & (1 << SKIP_NEARESTMV_SUB8X8_OFFSET)) refmv_ctx = 8;
+ }
+ }
+
+ int8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
+#define FRAME_TO_CHECK 1
+ if (cm->current_video_frame == FRAME_TO_CHECK /*&& cm->show_frame == 0*/) {
+ printf(
+ "=== DECODER ===: "
+ "Frame=%d, (mi_row,mi_col)=(%d,%d), mode=%d, bsize=%d, "
+ "show_frame=%d, mv[0]=(%d,%d), mv[1]=(%d,%d), ref[0]=%d, "
+ "ref[1]=%d, motion_mode=%d, inter_mode_ctx=%d, mode_ctx=%d, "
+ "interp_ctx=(%d,%d), interp_filter=(%d,%d), newmv_ctx=%d, "
+ "zeromv_ctx=%d, refmv_ctx=%d\n",
+ cm->current_video_frame, mi_row, mi_col, mbmi->mode, mbmi->sb_type,
+ cm->show_frame, mv[0].as_mv.row, mv[0].as_mv.col, mv[1].as_mv.row,
+ mv[1].as_mv.col, mbmi->ref_frame[0], mbmi->ref_frame[1],
+ mbmi->motion_mode, inter_mode_ctx[ref_frame_type], mode_ctx,
+ interp_ctx[0], interp_ctx[1], interp_filter[0], interp_filter[1],
+ newmv_ctx, zeromv_ctx, refmv_ctx);
+ }
+}
+#endif // DEC_MISMATCH_DEBUG
+
static void read_inter_block_mode_info(AV1Decoder *const pbi,
MACROBLOCKD *const xd,
MODE_INFO *const mi,
@@ -1695,6 +2221,9 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
int_mv nearestmv[2], nearmv[2];
int_mv ref_mvs[MODE_CTX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
int ref, is_compound;
+#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+ int is_singleref_comp_mode = 0;
+#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
int16_t inter_mode_ctx[MODE_CTX_REF_FRAMES];
#if CONFIG_EXT_INTER
int16_t compound_inter_mode_ctx[MODE_CTX_REF_FRAMES];
@@ -1702,12 +2231,11 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
int16_t mode_ctx = 0;
#if CONFIG_WARPED_MOTION
int pts[SAMPLES_ARRAY_SIZE], pts_inref[SAMPLES_ARRAY_SIZE];
+#if WARPED_MOTION_SORT_SAMPLES
+ int pts_mv[SAMPLES_ARRAY_SIZE];
+#endif // WARPED_MOTION_SORT_SAMPLES
#endif // CONFIG_WARPED_MOTION
-#if CONFIG_EC_ADAPT
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
-#else
- FRAME_CONTEXT *ec_ctx = cm->fc;
-#endif
assert(NELEMENTS(mode_2_counter) == MB_MODE_COUNT);
@@ -1721,6 +2249,21 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
read_ref_frames(cm, xd, r, mbmi->segment_id, mbmi->ref_frame);
is_compound = has_second_ref(mbmi);
+#if CONFIG_EXT_COMP_REFS
+#if !USE_UNI_COMP_REFS
+ // NOTE: uni-directional comp refs disabled
+ if (is_compound)
+ assert(mbmi->ref_frame[0] < BWDREF_FRAME &&
+ mbmi->ref_frame[1] >= BWDREF_FRAME);
+#endif // !USE_UNI_COMP_REFS
+#endif // CONFIG_EXT_COMP_REFS
+
+#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+ if (!is_compound)
+ is_singleref_comp_mode =
+ read_is_inter_singleref_comp_mode(cm, xd, mbmi->segment_id, r);
+#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+
for (ref = 0; ref < 1 + is_compound; ++ref) {
MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
@@ -1772,7 +2315,11 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
#if CONFIG_EXT_INTER
+#if CONFIG_COMPOUND_SINGLEREF
+ if (is_compound || is_singleref_comp_mode)
+#else // !CONFIG_COMPOUND_SINGLEREF
if (is_compound)
+#endif // CONFIG_COMPOUND_SINGLEREF
mode_ctx = compound_inter_mode_ctx[mbmi->ref_frame[0]];
else
#endif // CONFIG_EXT_INTER
@@ -1784,7 +2331,7 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
mbmi->mode = ZEROMV;
if (bsize < BLOCK_8X8 && !unify_bsize) {
aom_internal_error(xd->error_info, AOM_CODEC_UNSUP_BITSTREAM,
- "Invalid usage of segement feature on small blocks");
+ "Invalid usage of segment feature on small blocks");
return;
}
} else {
@@ -1792,16 +2339,23 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#if CONFIG_EXT_INTER
if (is_compound)
mbmi->mode = read_inter_compound_mode(cm, xd, r, mode_ctx);
+#if CONFIG_COMPOUND_SINGLEREF
+ else if (is_singleref_comp_mode)
+ mbmi->mode = read_inter_singleref_comp_mode(xd, r, mode_ctx);
+#endif // CONFIG_COMPOUND_SINGLEREF
else
#endif // CONFIG_EXT_INTER
mbmi->mode = read_inter_mode(ec_ctx, xd, r, mode_ctx);
#if CONFIG_EXT_INTER
if (mbmi->mode == NEWMV || mbmi->mode == NEW_NEWMV ||
+#if CONFIG_COMPOUND_SINGLEREF
+ mbmi->mode == SR_NEW_NEWMV ||
+#endif // CONFIG_COMPOUND_SINGLEREF
have_nearmv_in_inter_mode(mbmi->mode))
-#else
+#else // !CONFIG_EXT_INTER
if (mbmi->mode == NEARMV || mbmi->mode == NEWMV)
-#endif
- read_drl_idx(cm, xd, mbmi, r);
+#endif // CONFIG_EXT_INTER
+ read_drl_idx(ec_ctx, xd, mbmi, r);
}
}
@@ -1817,16 +2371,15 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
}
- if (mbmi->ref_mv_idx > 0) {
- int_mv cur_mv =
- xd->ref_mv_stack[mbmi->ref_frame[0]][1 + mbmi->ref_mv_idx].this_mv;
- nearmv[0] = cur_mv;
- }
-
#if CONFIG_EXT_INTER
+#if CONFIG_COMPOUND_SINGLEREF
+ if ((is_compound || is_singleref_comp_mode) &&
+ (bsize >= BLOCK_8X8 || unify_bsize) && mbmi->mode != ZERO_ZEROMV) {
+#else // !CONFIG_COMPOUND_SINGLEREF
if (is_compound && (bsize >= BLOCK_8X8 || unify_bsize) &&
mbmi->mode != ZERO_ZEROMV) {
-#else
+#endif // CONFIG_COMPOUND_SINGLEREF
+#else // !CONFIG_EXT_INTER
if (is_compound && (bsize >= BLOCK_8X8 || unify_bsize) &&
mbmi->mode != NEWMV && mbmi->mode != ZEROMV) {
#endif // CONFIG_EXT_INTER
@@ -1845,7 +2398,12 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
lower_mv_precision(&nearestmv[0].as_mv, allow_hp);
lower_mv_precision(&nearestmv[1].as_mv, allow_hp);
#if CONFIG_EXT_INTER
- } else if (mbmi->mode == NEAREST_NEWMV) {
+ } else if (mbmi->mode == NEAREST_NEWMV
+#if CONFIG_COMPOUND_SINGLEREF
+ || mbmi->mode == SR_NEAREST_NEARMV
+// || mbmi->mode == SR_NEAREST_NEWMV
+#endif // CONFIG_COMPOUND_SINGLEREF
+ ) {
nearestmv[0] = xd->ref_mv_stack[ref_frame_type][0].this_mv;
lower_mv_precision(&nearestmv[0].as_mv, allow_hp);
} else if (mbmi->mode == NEW_NEARESTMV) {
@@ -1858,17 +2416,30 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#if CONFIG_EXT_INTER
if (xd->ref_mv_count[ref_frame_type] > 1) {
int ref_mv_idx = 1 + mbmi->ref_mv_idx;
- if (compound_ref0_mode(mbmi->mode) == NEARMV) {
- nearmv[0] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
- lower_mv_precision(&nearmv[0].as_mv, allow_hp);
- }
+#if CONFIG_COMPOUND_SINGLEREF
+ if (is_compound) {
+#endif // CONFIG_COMPOUND_SINGLEREF
+ if (compound_ref0_mode(mbmi->mode) == NEARMV) {
+ nearmv[0] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
+ lower_mv_precision(&nearmv[0].as_mv, allow_hp);
+ }
- if (compound_ref1_mode(mbmi->mode) == NEARMV) {
- nearmv[1] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
- lower_mv_precision(&nearmv[1].as_mv, allow_hp);
+ if (compound_ref1_mode(mbmi->mode) == NEARMV) {
+ nearmv[1] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
+ lower_mv_precision(&nearmv[1].as_mv, allow_hp);
+ }
+#if CONFIG_COMPOUND_SINGLEREF
+ } else {
+ assert(is_singleref_comp_mode);
+ if (compound_ref0_mode(mbmi->mode) == NEARMV ||
+ compound_ref1_mode(mbmi->mode) == NEARMV) {
+ nearmv[0] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
+ lower_mv_precision(&nearmv[0].as_mv, allow_hp);
+ }
}
+#endif // CONFIG_COMPOUND_SINGLEREF
}
-#else
+#else // !CONFIG_EXT_INTER
if (xd->ref_mv_count[ref_frame_type] > 1) {
int ref_mv_idx = 1 + mbmi->ref_mv_idx;
nearestmv[0] = xd->ref_mv_stack[ref_frame_type][0].this_mv;
@@ -1877,6 +2448,10 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
nearmv[1] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].comp_mv;
}
#endif // CONFIG_EXT_INTER
+ } else if (mbmi->ref_mv_idx > 0 && mbmi->mode == NEARMV) {
+ int_mv cur_mv =
+ xd->ref_mv_stack[mbmi->ref_frame[0]][1 + mbmi->ref_mv_idx].this_mv;
+ nearmv[0] = cur_mv;
}
#if !CONFIG_DUAL_FILTER && !CONFIG_WARPED_MOTION && !CONFIG_GLOBAL_MOTION
@@ -2008,6 +2583,26 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
nearestmv[1] = ref_mv[1];
}
+#if CONFIG_COMPOUND_SINGLEREF
+ } else if (is_singleref_comp_mode) {
+ int ref_mv_idx = mbmi->ref_mv_idx;
+ // Special case: SR_NEAR_NEWMV use 1 + mbmi->ref_mv_idx (like NEARMV)
+ // instead of mbmi->ref_mv_idx (like NEWMV)
+ if (mbmi->mode == SR_NEAR_NEWMV) ref_mv_idx = 1 + mbmi->ref_mv_idx;
+
+ if (compound_ref0_mode(mbmi->mode) == NEWMV ||
+ compound_ref1_mode(mbmi->mode) == NEWMV) {
+ uint8_t ref_frame_type = av1_ref_frame_type(mbmi->ref_frame);
+ if (xd->ref_mv_count[ref_frame_type] > 1) {
+ ref_mv[0] = xd->ref_mv_stack[ref_frame_type][ref_mv_idx].this_mv;
+ clamp_mv_ref(&ref_mv[0].as_mv, xd->n8_w << MI_SIZE_LOG2,
+ xd->n8_h << MI_SIZE_LOG2, xd);
+ }
+ // TODO(zoeliu): To further investigate why this would not cause a
+ // mismatch for the mode of SR_NEAREST_NEWMV.
+ nearestmv[0] = ref_mv[0];
+ }
+#endif // CONFIG_COMPOUND_SINGLEREF
} else {
#endif // CONFIG_EXT_INTER
if (mbmi->mode == NEWMV) {
@@ -2043,8 +2638,13 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#endif
cm->allow_interintra_compound && is_interintra_allowed(mbmi)) {
const int bsize_group = size_group_lookup[bsize];
+#if CONFIG_NEW_MULTISYMBOL
+ const int interintra =
+ aom_read_symbol(r, ec_ctx->interintra_cdf[bsize_group], 2, ACCT_STR);
+#else
const int interintra =
aom_read(r, cm->fc->interintra_prob[bsize_group], ACCT_STR);
+#endif
if (xd->counts) xd->counts->interintra[bsize_group][interintra]++;
assert(mbmi->ref_frame[1] == NONE_FRAME);
if (interintra) {
@@ -2064,8 +2664,13 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
mbmi->filter_intra_mode_info.use_filter_intra_mode[1] = 0;
#endif // CONFIG_FILTER_INTRA
if (is_interintra_wedge_used(bsize)) {
+#if CONFIG_NEW_MULTISYMBOL
+ mbmi->use_wedge_interintra = aom_read_symbol(
+ r, ec_ctx->wedge_interintra_cdf[bsize], 2, ACCT_STR);
+#else
mbmi->use_wedge_interintra =
aom_read(r, cm->fc->wedge_interintra_prob[bsize], ACCT_STR);
+#endif
if (xd->counts)
xd->counts->wedge_interintra[bsize][mbmi->use_wedge_interintra]++;
if (mbmi->use_wedge_interintra) {
@@ -2078,11 +2683,25 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
}
#endif // CONFIG_EXT_INTER && CONFIG_INTERINTRA
+#if CONFIG_WARPED_MOTION
+ for (ref = 0; ref < 1 + has_second_ref(mbmi); ++ref) {
+ const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
+ RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
+
+ xd->block_refs[ref] = ref_buf;
+ }
+#endif
+
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
mbmi->motion_mode = SIMPLE_TRANSLATION;
#if CONFIG_WARPED_MOTION
if (mbmi->sb_type >= BLOCK_8X8 && !has_second_ref(mbmi))
+#if WARPED_MOTION_SORT_SAMPLES
+ mbmi->num_proj_ref[0] =
+ findSamples(cm, xd, mi_row, mi_col, pts, pts_inref, pts_mv);
+#else
mbmi->num_proj_ref[0] = findSamples(cm, xd, mi_row, mi_col, pts, pts_inref);
+#endif // WARPED_MOTION_SORT_SAMPLES
#endif // CONFIG_WARPED_MOTION
#if CONFIG_MOTION_VAR
av1_count_overlappable_neighbors(cm, xd, mi_row, mi_col);
@@ -2095,13 +2714,28 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
if (mbmi->ref_frame[1] != INTRA_FRAME)
#endif // CONFIG_EXT_INTER
mbmi->motion_mode = read_motion_mode(cm, xd, mi, r);
+
+#if CONFIG_NCOBMC_ADAPT_WEIGHT
+ read_ncobmc_mode(xd, mi, mbmi->ncobmc_mode, r);
+#endif
+
+#if CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
+ if (is_singleref_comp_mode) assert(mbmi->motion_mode == SIMPLE_TRANSLATION);
+#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
#if CONFIG_WARPED_MOTION
if (mbmi->motion_mode == WARPED_CAUSAL) {
mbmi->wm_params[0].wmtype = DEFAULT_WMTYPE;
+
+#if WARPED_MOTION_SORT_SAMPLES
+ if (mbmi->num_proj_ref[0] > 1)
+ mbmi->num_proj_ref[0] = sortSamples(pts_mv, &mbmi->mv[0].as_mv, pts,
+ pts_inref, mbmi->num_proj_ref[0]);
+#endif // WARPED_MOTION_SORT_SAMPLES
+
if (find_projection(mbmi->num_proj_ref[0], pts, pts_inref, bsize,
mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col,
&mbmi->wm_params[0], mi_row, mi_col)) {
- assert(0 && "Invalid Warped Model.");
+ aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid Warped Model");
}
}
#endif // CONFIG_WARPED_MOTION
@@ -2112,8 +2746,13 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#if CONFIG_EXT_INTER
mbmi->interinter_compound_type = COMPOUND_AVERAGE;
- if (cm->reference_mode != SINGLE_REFERENCE &&
+ if (
+#if CONFIG_COMPOUND_SINGLEREF
+ is_inter_anyref_comp_mode(mbmi->mode)
+#else // !CONFIG_COMPOUND_SINGLEREF
+ cm->reference_mode != SINGLE_REFERENCE &&
is_inter_compound_mode(mbmi->mode)
+#endif // CONFIG_COMPOUND_SINGLEREF
#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
&& mbmi->motion_mode == SIMPLE_TRANSLATION
#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION
@@ -2121,9 +2760,8 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
if (is_any_masked_compound_used(bsize)) {
#if CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
if (cm->allow_masked_compound) {
- mbmi->interinter_compound_type =
- aom_read_tree(r, av1_compound_type_tree,
- cm->fc->compound_type_prob[bsize], ACCT_STR);
+ mbmi->interinter_compound_type = aom_read_symbol(
+ r, ec_ctx->compound_type_cdf[bsize], COMPOUND_TYPES, ACCT_STR);
#if CONFIG_WEDGE
if (mbmi->interinter_compound_type == COMPOUND_WEDGE) {
mbmi->wedge_index =
@@ -2149,6 +2787,11 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi,
#if CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION || CONFIG_GLOBAL_MOTION
read_mb_interp_filter(cm, xd, mbmi, r);
#endif // CONFIG_DUAL_FILTER || CONFIG_WARPED_MOTION
+
+#if DEC_MISMATCH_DEBUG
+ // NOTE(zoeliu): For debug
+ dec_dump_logs(cm, mi, xd, mi_row, mi_col, inter_mode_ctx, mode_ctx);
+#endif // DEC_MISMATCH_DEBUG
}
static void read_inter_frame_mode_info(AV1Decoder *const pbi,
@@ -2223,6 +2866,26 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
for (idx = 0; idx < width; idx += bw)
read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size,
height != width, idy, idx, r);
+#if CONFIG_RECT_TX_EXT
+ if (is_quarter_tx_allowed(xd, mbmi, inter_block) &&
+ mbmi->tx_size == max_tx_size) {
+ int quarter_tx;
+
+ if (quarter_txsize_lookup[bsize] != max_tx_size) {
+ quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
+ if (xd->counts) ++xd->counts->quarter_tx_size[quarter_tx];
+ } else {
+ quarter_tx = 1;
+ }
+ if (quarter_tx) {
+ mbmi->tx_size = quarter_txsize_lookup[bsize];
+ for (idy = 0; idy < tx_size_high_unit[max_tx_size] / 2; ++idy)
+ for (idx = 0; idx < tx_size_wide_unit[max_tx_size] / 2; ++idx)
+ mbmi->inter_tx_size[idy][idx] = mbmi->tx_size;
+ mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size);
+ }
+ }
+#endif
} else {
mbmi->tx_size = read_tx_size(cm, xd, inter_block, !mbmi->skip, r);