Lines Matching +full:32 +full:- +full:61

1 // SPDX-License-Identifier: GPL-2.0
12 #include <media/v4l2-vp9.h>
19 { 73, 32, 19, 187, 222, 215, 46, 34, 100 }, /*left = h */
20 { 91, 30, 32, 116, 121, 186, 93, 86, 94 }, /*left = d45 */
25 { 74, 32, 27, 107, 86, 160, 63, 134, 102 }, /*left = d63 */
33 { 38, 32, 85, 140, 46, 112, 54, 151, 133 }, /*left = d117*/
34 { 39, 27, 61, 131, 110, 175, 44, 75, 136 }, /*left = d153*/
37 { 36, 61, 116, 114, 128, 162, 80, 125, 82 }, /*left = tm */
39 { 82, 26, 26, 171, 208, 204, 44, 32, 105 }, /*left = dc */
54 { 60, 32, 33, 112, 71, 220, 64, 89, 104 }, /*left = d135*/
58 { 61, 29, 29, 93, 97, 165, 83, 175, 162 }, /*left = d63 */
62 { 53, 40, 55, 139, 69, 183, 61, 80, 110 }, /*left = v */
64 { 60, 34, 19, 105, 61, 198, 53, 64, 89 }, /*left = d45 */
79 { 45, 18, 32, 130, 90, 157, 40, 79, 91 }, /*left = d207*/
83 { 75, 17, 22, 136, 138, 185, 32, 34, 166 }, /*left = dc */
90 { 51, 24, 14, 115, 133, 209, 32, 26, 104 }, /*left = d207*/
94 { 82, 22, 32, 127, 143, 213, 39, 41, 70 }, /*left = dc */
95 { 62, 44, 61, 123, 105, 189, 48, 57, 64 }, /*left = v */
103 { 40, 61, 26, 126, 152, 206, 61, 59, 93 }, /*left = tm */
110 { 43, 28, 37, 121, 63, 123, 61, 192, 169 }, /*left = d117*/
113 { 46, 23, 32, 74, 86, 150, 67, 183, 88 }, /*left = d63 */
116 { 65, 70, 60, 155, 159, 199, 61, 60, 81 }, /*left = dc */
118 { 39, 38, 21, 184, 227, 206, 42, 32, 64 }, /*left = h */
131 /* 8x8 -> 4x4 */
136 /* 16x16 -> 8x8 */
141 /* 32x32 -> 16x16 */
146 /* 64x64 -> 32x32 */
157 { 113, 12, 23, 188, 226, 142, 26, 32, 125 }, /* y = h */
159 { 113, 9, 36, 155, 111, 157, 32, 44, 161 }, /* y = d135 */
160 { 116, 9, 55, 176, 76, 96, 37, 61, 149 }, /* y = d117 */
162 { 120, 12, 32, 145, 195, 142, 32, 38, 86 }, /* y = d207 */
199 { 1, 19, 32 },
256 { 1, 32, 60 },
278 { 61, 49, 166 },
373 { 3, 61, 124 },
383 { 8, 23, 61 },
454 { 2, 61, 95 },
474 { 32, 186, 224 },
487 { 1, 19, 32 },
581 { 1, 18, 32 },
584 { 61, 199, 240 },
692 { 1, 32, 56 },
767 { 32, 146, 209 },
800 { /* tx = 32x32 */
816 { 2, 32, 55 },
867 { 10, 49, 61 },
882 { 1, 36, 61 },
890 { 1, 35, 61 },
907 { 61, 37, 123 },
933 { 1, 61, 97 },
1035 { 65, 32, 18, 144, 162, 194, 41, 51, 98 },
1047 { 86, 5, 32, 154, 192, 168, 14, 22, 163 } /* y = d153 */,
1048 { 85, 5, 32, 156, 216, 148, 19, 29, 73 } /* y = d207 */,
1053 /* 8x8 -> 4x4 */
1058 /* 16x16 -> 8x8 */
1063 /* 32x32 -> 16x16 */
1068 /* 64x64 -> 32x32 */
1071 { 58, 32, 12 } /* l split, a not split */,
1076 .joint = { 32, 64, 96 },
1109 #define DIV_INV(d) ((u32)(((1ULL << 32) + ((d) - 1)) / (d))) in fastdiv()
1120 DIVS_INV(30, 31, 32, 33, 34, 35, 36, 37, 38, 39), in fastdiv()
1123 DIVS_INV(60, 61, 62, 63, 64, 65, 66, 67, 68, 69), in fastdiv()
1151 if (WARN_ON(divisor - 2 >= ARRAY_SIZE(inv))) in fastdiv()
1154 return ((u64)dividend * inv[divisor - 2]) >> 32; in fastdiv()
1164 return m - ((v + 1) >> 1); in inv_recenter_nonneg()
1179 1 + inv_recenter_nonneg(delta, prob - 1) : in update_prob()
1180 255 - inv_recenter_nonneg(delta, 255 - prob); in update_prob()
1189 for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { in update_tx_probs()
1190 u8 *p8x8 = probs->tx8[i]; in update_tx_probs()
1191 u8 *p16x16 = probs->tx16[i]; in update_tx_probs()
1192 u8 *p32x32 = probs->tx32[i]; in update_tx_probs()
1193 const u8 *d8x8 = deltas->tx8[i]; in update_tx_probs()
1194 const u8 *d16x16 = deltas->tx16[i]; in update_tx_probs()
1195 const u8 *d32x32 = deltas->tx32[i]; in update_tx_probs()
1229 for (i = 0; i < ARRAY_SIZE(probs->coef); i++) { in update_coef_probs()
1230 for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) in update_coef_probs()
1231 for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) in update_coef_probs()
1232 update_coeff(deltas->coef[i][j][k], probs->coef[i][j][k]); in update_coef_probs()
1234 if (deltas->tx_mode == i) in update_coef_probs()
1245 for (i = 0; i < ARRAY_SIZE(probs->skip); i++) in update_skip_probs()
1246 probs->skip[i] = update_prob(deltas->skip[i], probs->skip[i]); in update_skip_probs()
1255 for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) { in update_inter_mode_probs()
1256 u8 *p = probs->inter_mode[i]; in update_inter_mode_probs()
1257 const u8 *d = deltas->inter_mode[i]; in update_inter_mode_probs()
1271 for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) { in update_interp_filter_probs()
1272 u8 *p = probs->interp_filter[i]; in update_interp_filter_probs()
1273 const u8 *d = deltas->interp_filter[i]; in update_interp_filter_probs()
1286 for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) in update_is_inter_probs()
1287 probs->is_inter[i] = update_prob(deltas->is_inter[i], probs->is_inter[i]); in update_is_inter_probs()
1301 for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) in update_frame_reference_mode_probs()
1302 probs->comp_mode[i] = update_prob(deltas->comp_mode[i], in update_frame_reference_mode_probs()
1303 probs->comp_mode[i]); in update_frame_reference_mode_probs()
1306 for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) { in update_frame_reference_mode_probs()
1307 u8 *p = probs->single_ref[i]; in update_frame_reference_mode_probs()
1308 const u8 *d = deltas->single_ref[i]; in update_frame_reference_mode_probs()
1315 for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) in update_frame_reference_mode_probs()
1316 probs->comp_ref[i] = update_prob(deltas->comp_ref[i], probs->comp_ref[i]); in update_frame_reference_mode_probs()
1325 for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) in update_y_mode_probs()
1326 for (j = 0; j < ARRAY_SIZE(probs->y_mode[0]); ++j) in update_y_mode_probs()
1327 probs->y_mode[i][j] = in update_y_mode_probs()
1328 update_prob(deltas->y_mode[i][j], probs->y_mode[i][j]); in update_y_mode_probs()
1339 u8 *p = probs->partition[i * 4 + j]; in update_partition_probs()
1340 const u8 *d = deltas->partition[i * 4 + j]; in update_partition_probs()
1361 u8 *p = probs->mv.joint; in update_mv_probs()
1362 const u8 *d = deltas->mv.joint; in update_mv_probs()
1369 for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { in update_mv_probs()
1370 p = probs->mv.sign; in update_mv_probs()
1371 d = deltas->mv.sign; in update_mv_probs()
1374 p = probs->mv.classes[i]; in update_mv_probs()
1375 d = deltas->mv.classes[i]; in update_mv_probs()
1376 for (j = 0; j < ARRAY_SIZE(probs->mv.classes[0]); j++) in update_mv_probs()
1379 p = probs->mv.class0_bit; in update_mv_probs()
1380 d = deltas->mv.class0_bit; in update_mv_probs()
1383 p = probs->mv.bits[i]; in update_mv_probs()
1384 d = deltas->mv.bits[i]; in update_mv_probs()
1385 for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) in update_mv_probs()
1388 for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) { in update_mv_probs()
1389 p = probs->mv.class0_fr[i][j]; in update_mv_probs()
1390 d = deltas->mv.class0_fr[i][j]; in update_mv_probs()
1397 p = probs->mv.fr[i]; in update_mv_probs()
1398 d = deltas->mv.fr[i]; in update_mv_probs()
1399 for (j = 0; j < ARRAY_SIZE(probs->mv.fr[i]); j++) in update_mv_probs()
1402 if (dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV) { in update_mv_probs()
1403 p = probs->mv.class0_hp; in update_mv_probs()
1404 d = deltas->mv.class0_hp; in update_mv_probs()
1407 p = probs->mv.hp; in update_mv_probs()
1408 d = deltas->mv.hp; in update_mv_probs()
1419 if (deltas->tx_mode == V4L2_VP9_TX_MODE_SELECT) in v4l2_vp9_fw_update_probs()
1426 if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || in v4l2_vp9_fw_update_probs()
1427 dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) in v4l2_vp9_fw_update_probs()
1432 if (dec_params->interpolation_filter == V4L2_VP9_INTERP_FILTER_SWITCHABLE) in v4l2_vp9_fw_update_probs()
1437 update_frame_reference_mode_probs(dec_params->reference_mode, probs, deltas); in v4l2_vp9_fw_update_probs()
1452 u8 fctx_idx = dec_params->frame_context_idx; in v4l2_vp9_reset_frame_ctx()
1454 if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || in v4l2_vp9_reset_frame_ctx()
1455 dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY || in v4l2_vp9_reset_frame_ctx()
1456 dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) { in v4l2_vp9_reset_frame_ctx()
1464 if (dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME || in v4l2_vp9_reset_frame_ctx()
1465 dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT || in v4l2_vp9_reset_frame_ctx()
1466 dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_ALL) in v4l2_vp9_reset_frame_ctx()
1471 else if (dec_params->reset_frame_context == V4L2_VP9_RESET_FRAME_CTX_SPEC) in v4l2_vp9_reset_frame_ctx()
1502 * Round2(pre_prob * (256 - factor) + prob * factor, 8) in merge_prob()
1503 * Round2(pre_prob * 256 + (prob - pre_prob) * factor, 8) in merge_prob()
1504 * (pre_prob * 256 >> 8) + (((prob - pre_prob) * factor + 128) >> 8) in merge_prob()
1506 return pre_prob + (((prob - pre_prob) * factor + 128) >> 8); in merge_prob()
1518 * It turns out that in all cases the recursive calls boil down to a short-ish series
1522 * ---------
1528 * ---------
1534 * ---------
1541 * ---------
1554 * ---------
1564 * ---------
1571 * ---------
1610 sum -= c[9]; in merge_probs_variant_d()
1612 sum -= c[1]; in merge_probs_variant_d()
1615 sum -= s2; in merge_probs_variant_d()
1617 s2 -= c[2]; in merge_probs_variant_d()
1620 sum -= c[3]; in merge_probs_variant_d()
1622 sum -= c[8]; in merge_probs_variant_d()
1646 sum -= c[1]; in merge_probs_variant_g()
1648 sum -= c[2] + c[3]; in merge_probs_variant_g()
1651 sum -= c[4] + c[5]; in merge_probs_variant_g()
1654 sum -= c[6]; in merge_probs_variant_g()
1679 for (l = 0; l < ARRAY_SIZE(probs->coef[0][0][0]); l++) { in _adapt_coeff()
1681 u8 *p = probs->coef[i][j][k][l][m]; in _adapt_coeff()
1683 *counts->eob[i][j][k][l][m][1], in _adapt_coeff()
1684 *counts->eob[i][j][k][l][m][0] - *counts->eob[i][j][k][l][m][1], in _adapt_coeff()
1687 adapt_probs_variant_a_coef(p, *counts->coeff[i][j][k][l][m], uf); in _adapt_coeff()
1699 for (i = 0; i < ARRAY_SIZE(probs->coef); i++) in _adapt_coef_probs()
1700 for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) in _adapt_coef_probs()
1701 for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++) in _adapt_coef_probs()
1766 for (i = 0; i < ARRAY_SIZE(probs->is_inter); i++) in v4l2_vp9_adapt_noncoef_probs()
1767 probs->is_inter[i] = adapt_prob(probs->is_inter[i], (*counts->intra_inter)[i]); in v4l2_vp9_adapt_noncoef_probs()
1769 for (i = 0; i < ARRAY_SIZE(probs->comp_mode); i++) in v4l2_vp9_adapt_noncoef_probs()
1770 probs->comp_mode[i] = adapt_prob(probs->comp_mode[i], (*counts->comp)[i]); in v4l2_vp9_adapt_noncoef_probs()
1772 for (i = 0; i < ARRAY_SIZE(probs->comp_ref); i++) in v4l2_vp9_adapt_noncoef_probs()
1773 probs->comp_ref[i] = adapt_prob(probs->comp_ref[i], (*counts->comp_ref)[i]); in v4l2_vp9_adapt_noncoef_probs()
1776 for (i = 0; i < ARRAY_SIZE(probs->single_ref); i++) in v4l2_vp9_adapt_noncoef_probs()
1777 for (j = 0; j < ARRAY_SIZE(probs->single_ref[0]); j++) in v4l2_vp9_adapt_noncoef_probs()
1778 probs->single_ref[i][j] = adapt_prob(probs->single_ref[i][j], in v4l2_vp9_adapt_noncoef_probs()
1779 (*counts->single_ref)[i][j]); in v4l2_vp9_adapt_noncoef_probs()
1781 for (i = 0; i < ARRAY_SIZE(probs->inter_mode); i++) in v4l2_vp9_adapt_noncoef_probs()
1782 adapt_probs_variant_c(probs->inter_mode[i], (*counts->mv_mode)[i]); in v4l2_vp9_adapt_noncoef_probs()
1784 for (i = 0; i < ARRAY_SIZE(probs->y_mode); i++) in v4l2_vp9_adapt_noncoef_probs()
1785 adapt_probs_variant_d(probs->y_mode[i], (*counts->y_mode)[i]); in v4l2_vp9_adapt_noncoef_probs()
1787 for (i = 0; i < ARRAY_SIZE(probs->uv_mode); i++) in v4l2_vp9_adapt_noncoef_probs()
1788 adapt_probs_variant_d(probs->uv_mode[i], (*counts->uv_mode)[i]); in v4l2_vp9_adapt_noncoef_probs()
1790 for (i = 0; i < ARRAY_SIZE(probs->partition); i++) in v4l2_vp9_adapt_noncoef_probs()
1791 adapt_probs_variant_e(probs->partition[i], (*counts->partition)[i]); in v4l2_vp9_adapt_noncoef_probs()
1793 for (i = 0; i < ARRAY_SIZE(probs->skip); i++) in v4l2_vp9_adapt_noncoef_probs()
1794 probs->skip[i] = adapt_prob(probs->skip[i], (*counts->skip)[i]); in v4l2_vp9_adapt_noncoef_probs()
1797 for (i = 0; i < ARRAY_SIZE(probs->interp_filter); i++) in v4l2_vp9_adapt_noncoef_probs()
1798 adapt_probs_variant_f(probs->interp_filter[i], (*counts->filter)[i]); in v4l2_vp9_adapt_noncoef_probs()
1801 for (i = 0; i < ARRAY_SIZE(probs->tx8); i++) { in v4l2_vp9_adapt_noncoef_probs()
1802 adapt_probs_variant_b(probs->tx8[i], (*counts->tx8p)[i]); in v4l2_vp9_adapt_noncoef_probs()
1803 adapt_probs_variant_f(probs->tx16[i], (*counts->tx16p)[i]); in v4l2_vp9_adapt_noncoef_probs()
1804 adapt_probs_variant_e(probs->tx32[i], (*counts->tx32p)[i]); in v4l2_vp9_adapt_noncoef_probs()
1807 adapt_probs_variant_e(probs->mv.joint, *counts->mv_joint); in v4l2_vp9_adapt_noncoef_probs()
1809 for (i = 0; i < ARRAY_SIZE(probs->mv.sign); i++) { in v4l2_vp9_adapt_noncoef_probs()
1810 probs->mv.sign[i] = adapt_prob(probs->mv.sign[i], (*counts->sign)[i]); in v4l2_vp9_adapt_noncoef_probs()
1812 adapt_probs_variant_g(probs->mv.classes[i], (*counts->classes)[i]); in v4l2_vp9_adapt_noncoef_probs()
1814 probs->mv.class0_bit[i] = adapt_prob(probs->mv.class0_bit[i], (*counts->class0)[i]); in v4l2_vp9_adapt_noncoef_probs()
1816 for (j = 0; j < ARRAY_SIZE(probs->mv.bits[0]); j++) in v4l2_vp9_adapt_noncoef_probs()
1817 probs->mv.bits[i][j] = adapt_prob(probs->mv.bits[i][j], in v4l2_vp9_adapt_noncoef_probs()
1818 (*counts->bits)[i][j]); in v4l2_vp9_adapt_noncoef_probs()
1820 for (j = 0; j < ARRAY_SIZE(probs->mv.class0_fr[0]); j++) in v4l2_vp9_adapt_noncoef_probs()
1821 adapt_probs_variant_e(probs->mv.class0_fr[i][j], in v4l2_vp9_adapt_noncoef_probs()
1822 (*counts->class0_fp)[i][j]); in v4l2_vp9_adapt_noncoef_probs()
1824 adapt_probs_variant_e(probs->mv.fr[i], (*counts->fp)[i]); in v4l2_vp9_adapt_noncoef_probs()
1829 probs->mv.class0_hp[i] = adapt_prob(probs->mv.class0_hp[i], in v4l2_vp9_adapt_noncoef_probs()
1830 (*counts->class0_hp)[i]); in v4l2_vp9_adapt_noncoef_probs()
1832 probs->mv.hp[i] = adapt_prob(probs->mv.hp[i], (*counts->hp)[i]); in v4l2_vp9_adapt_noncoef_probs()