diff options
Diffstat (limited to 'third_party/aom/av1/common/scale.c')
-rw-r--r-- | third_party/aom/av1/common/scale.c | 56 |
1 files changed, 39 insertions, 17 deletions
diff --git a/third_party/aom/av1/common/scale.c b/third_party/aom/av1/common/scale.c index 76beaa2bd..d5ccdfec0 100644 --- a/third_party/aom/av1/common/scale.c +++ b/third_party/aom/av1/common/scale.c @@ -14,17 +14,28 @@ #include "av1/common/scale.h" #include "aom_dsp/aom_filter.h" +// Note: Expect val to be in q4 precision static INLINE int scaled_x(int val, const struct scale_factors *sf) { - return (int)((int64_t)val * sf->x_scale_fp >> REF_SCALE_SHIFT); + const int off = + (sf->x_scale_fp - (1 << REF_SCALE_SHIFT)) * (1 << (SUBPEL_BITS - 1)); + const int64_t tval = (int64_t)val * sf->x_scale_fp + off; + return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval, + REF_SCALE_SHIFT - SCALE_EXTRA_BITS); } +// Note: Expect val to be in q4 precision static INLINE int scaled_y(int val, const struct scale_factors *sf) { - return (int)((int64_t)val * sf->y_scale_fp >> REF_SCALE_SHIFT); + const int off = + (sf->y_scale_fp - (1 << REF_SCALE_SHIFT)) * (1 << (SUBPEL_BITS - 1)); + const int64_t tval = (int64_t)val * sf->y_scale_fp + off; + return (int)ROUND_POWER_OF_TWO_SIGNED_64(tval, + REF_SCALE_SHIFT - SCALE_EXTRA_BITS); } +// Note: Expect val to be in q4 precision static int unscaled_value(int val, const struct scale_factors *sf) { (void)sf; - return val; + return val << SCALE_EXTRA_BITS; } static int get_fixed_point_scale_factor(int other_size, int this_size) { @@ -32,14 +43,24 @@ static int get_fixed_point_scale_factor(int other_size, int this_size) { // and use fixed point scaling factors in decoding and encoding routines. // Hardware implementations can calculate scale factor in device driver // and use multiplication and shifting on hardware instead of division. - return (other_size << REF_SCALE_SHIFT) / this_size; + return ((other_size << REF_SCALE_SHIFT) + this_size / 2) / this_size; } -MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) { - const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK; - const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK; - const MV32 res = { scaled_y(mv->row, sf) + y_off_q4, - scaled_x(mv->col, sf) + x_off_q4 }; +static int get_coarse_point_scale_factor(int other_size, int this_size) { + // Calculate scaling factor once for each reference frame + // and use fixed point scaling factors in decoding and encoding routines. + // Hardware implementations can calculate scale factor in device driver + // and use multiplication and shifting on hardware instead of division. + return ((other_size << SCALE_SUBPEL_BITS) + this_size / 2) / this_size; +} + +// Note: x and y are integer precision, mvq4 is q4 precision. +MV32 av1_scale_mv(const MV *mvq4, int x, int y, + const struct scale_factors *sf) { + const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf); + const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf); + const MV32 res = { scaled_y((y << SUBPEL_BITS) + mvq4->row, sf) - y_off_q4, + scaled_x((x << SUBPEL_BITS) + mvq4->col, sf) - x_off_q4 }; return res; } @@ -59,8 +80,9 @@ void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); - sf->x_step_q4 = scaled_x(16, sf); - sf->y_step_q4 = scaled_y(16, sf); + + sf->x_step_q4 = get_coarse_point_scale_factor(other_w, this_w); + sf->y_step_q4 = get_coarse_point_scale_factor(other_h, this_h); if (av1_is_scaled(sf)) { sf->scale_value_x = scaled_x; @@ -76,8 +98,8 @@ void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, // applied in one direction only, and not at all for 0,0, seems to give the // best quality, but it may be worth trying an additional mode that does // do the filtering on full-pel. - if (sf->x_step_q4 == 16) { - if (sf->y_step_q4 == 16) { + if (sf->x_step_q4 == SCALE_SUBPEL_SHIFTS) { + if (sf->y_step_q4 == SCALE_SUBPEL_SHIFTS) { // No scaling in either direction. sf->predict[0][0][0] = aom_convolve_copy; sf->predict[0][0][1] = aom_convolve_avg; @@ -95,7 +117,7 @@ void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, sf->predict[1][0][1] = aom_convolve8_avg; } } else { - if (sf->y_step_q4 == 16) { + if (sf->y_step_q4 == SCALE_SUBPEL_SHIFTS) { // No scaling in the y direction. Must always scale in the x direction. sf->predict[0][0][0] = aom_convolve8_horiz; sf->predict[0][0][1] = aom_convolve8_avg_horiz; @@ -119,8 +141,8 @@ void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, #if CONFIG_HIGHBITDEPTH if (use_highbd) { - if (sf->x_step_q4 == 16) { - if (sf->y_step_q4 == 16) { + if (sf->x_step_q4 == SCALE_SUBPEL_SHIFTS) { + if (sf->y_step_q4 == SCALE_SUBPEL_SHIFTS) { // No scaling in either direction. sf->highbd_predict[0][0][0] = aom_highbd_convolve_copy; sf->highbd_predict[0][0][1] = aom_highbd_convolve_avg; @@ -138,7 +160,7 @@ void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w, sf->highbd_predict[1][0][1] = aom_highbd_convolve8_avg; } } else { - if (sf->y_step_q4 == 16) { + if (sf->y_step_q4 == SCALE_SUBPEL_SHIFTS) { // No scaling in the y direction. Must always scale in the x direction. sf->highbd_predict[0][0][0] = aom_highbd_convolve8_horiz; sf->highbd_predict[0][0][1] = aom_highbd_convolve8_avg_horiz; |