[x265] [PATCH] asm: disabled 10bpp AVX & AVX2 primitives having less than 3% speed up over SSE
Dnyaneshwar Gorade
dnyaneshwar at multicorewareinc.com
Tue Aug 18 09:12:24 CEST 2015
right.. but small correction - in #if 0 .... #endif disable only specific
primitives and not all sizes (expand the macro & keep only less than 3%)
On Tue, Aug 18, 2015 at 12:05 PM, <aasaipriya at multicorewareinc.com> wrote:
> # HG changeset patch
> # User Aasaipriya Chandran <aasaipriya at multicorewareinc.com>
> # Date 1439879745 -19800
> # Tue Aug 18 12:05:45 2015 +0530
> # Node ID 2d0d8be0f401aa4eac554a280118376a991f5475
> # Parent 996ebce8c874fc511d495cee227d24413e99d0c1
> asm: disabled 10bpp AVX & AVX2 primitives having less than 3% speed up
> over SSE
>
> these primitives are slower than SSE primitives
>
> diff -r 996ebce8c874 -r 2d0d8be0f401 source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp Mon Aug 17 10:52:15 2015
> +0530
> +++ b/source/common/x86/asm-primitives.cpp Tue Aug 18 12:05:45 2015
> +0530
> @@ -1169,7 +1169,6 @@
> }
> if (cpuMask & X265_CPU_AVX)
> {
> - // p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d =
> PFX(pixel_satd_4x4_avx); fails tests
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].satd =
> PFX(pixel_satd_16x24_avx);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].satd =
> PFX(pixel_satd_32x48_avx);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].satd =
> PFX(pixel_satd_24x64_avx);
> @@ -1177,32 +1176,36 @@
> p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].satd =
> PFX(pixel_satd_8x12_avx);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].satd =
> PFX(pixel_satd_12x32_avx);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].satd =
> PFX(pixel_satd_4x32_avx);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd =
> PFX(pixel_satd_4x8_avx);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].satd =
> PFX(pixel_satd_8x16_avx);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].satd =
> PFX(pixel_satd_4x4_avx);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].satd =
> PFX(pixel_satd_8x8_avx);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].satd =
> PFX(pixel_satd_4x16_avx);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].satd =
> PFX(pixel_satd_8x32_avx);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd =
> PFX(pixel_satd_8x4_avx);
> -
> - ALL_LUMA_PU(satd, pixel_satd, avx);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].satd =
> PFX(pixel_satd_8x8_avx);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].satd =
> PFX(pixel_satd_8x4_avx);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].satd =
> PFX(pixel_satd_8x16_avx);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].satd =
> PFX(pixel_satd_8x32_avx);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].satd =
> PFX(pixel_satd_12x16_avx);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].satd =
> PFX(pixel_satd_24x32_avx);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd =
> PFX(pixel_satd_4x16_avx);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].satd =
> PFX(pixel_satd_4x8_avx);
> -#if X265_DEPTH <= 10
> - ASSIGN_SA8D(avx);
> -#endif
> - p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d =
> PFX(pixel_sa8d_8x8_avx);
> - p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d =
> PFX(pixel_sa8d_16x16_avx);
> - p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d =
> PFX(pixel_sa8d_32x32_avx);
> - LUMA_VAR(avx);
> - p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_avx);
> - p.ssim_end_4 = PFX(pixel_ssim_end4_avx);
> +
> + p.pu[LUMA_8x8].satd = PFX(pixel_satd_8x8_avx);
> + p.pu[LUMA_16x16].satd = PFX(pixel_satd_16x16_avx);
> + p.pu[LUMA_32x32].satd = PFX(pixel_satd_32x32_avx);
> + p.pu[LUMA_64x64].satd = PFX(pixel_satd_64x64_avx);
> + p.pu[LUMA_16x8].satd = PFX(pixel_satd_16x8_avx);
> + p.pu[LUMA_8x16].satd = PFX(pixel_satd_8x16_avx);
> + p.pu[LUMA_16x32].satd = PFX(pixel_satd_16x32_avx);
> + p.pu[LUMA_32x16].satd = PFX(pixel_satd_32x16_avx);
> + p.pu[LUMA_64x32].satd = PFX(pixel_satd_64x32_avx);
> + p.pu[LUMA_32x64].satd = PFX(pixel_satd_32x64_avx);
> + p.pu[LUMA_16x12].satd = PFX(pixel_satd_16x12_avx);
> + p.pu[LUMA_16x4].satd = PFX(pixel_satd_16x4_avx);
> + p.pu[LUMA_32x24].satd = PFX(pixel_satd_32x24_avx);
> + p.pu[LUMA_24x32].satd = PFX(pixel_satd_24x32_avx);
> + p.pu[LUMA_32x8].satd = PFX(pixel_satd_32x8_avx);
> + p.pu[LUMA_8x32].satd = PFX(pixel_satd_8x32_avx);
> + p.pu[LUMA_64x48].satd = PFX(pixel_satd_64x48_avx);
> + p.pu[LUMA_48x64].satd = PFX(pixel_satd_48x64_avx);
> + p.pu[LUMA_64x16].satd = PFX(pixel_satd_64x16_avx);
> + p.pu[LUMA_16x64].satd = PFX(pixel_satd_16x64_avx);
> +
> + p.cu[BLOCK_16x16].var = PFX(pixel_var_16x16_avx);
>
> // copy_pp primitives
> // 16 x N
> @@ -1299,6 +1302,33 @@
> p.pu[LUMA_64x32].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x32_avx);
> p.pu[LUMA_64x48].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x48_avx);
> p.pu[LUMA_64x64].copy_pp = (copy_pp_t)PFX(blockcopy_ss_64x64_avx);
> +
> + /* The following primitives have been disabled since performance
> compared to SSE4 is negligible/negative */
> +#if 0
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd =
> PFX(pixel_satd_4x8_avx);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].satd =
> PFX(pixel_satd_4x8_avx);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].satd =
> PFX(pixel_satd_4x4_avx);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].satd =
> PFX(pixel_satd_4x16_avx);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd =
> PFX(pixel_satd_4x16_avx);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd =
> PFX(pixel_satd_8x4_avx);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].satd =
> PFX(pixel_satd_8x4_avx);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].satd =
> PFX(pixel_satd_12x16_avx);
> +
> + ALL_LUMA_PU(satd, pixel_satd, avx);
> +
> + LUMA_VAR(avx);
> + p.cu[BLOCK_8x8].var = PFX(pixel_var_8x8_avx);
> + p.cu[BLOCK_32x32].var = PFX(pixel_var_32x32_avx);
> + p.cu[BLOCK_64x64].var = PFX(pixel_var_64x64_avx)
> +
> + ASSIGN_SA8D(avx);
> + p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d =
> PFX(pixel_sa8d_8x8_avx);
> + p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d =
> PFX(pixel_sa8d_16x16_avx);
> + p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d =
> PFX(pixel_sa8d_32x32_avx);
> +
> + p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_avx);
> + p.ssim_end_4 = PFX(pixel_ssim_end4_avx);
> +#endif
> }
> if (cpuMask & X265_CPU_XOP)
> {
> @@ -1396,12 +1426,6 @@
> p.cu[BLOCK_32x32].intra_pred[34] =
> PFX(intra_pred_ang32_2_avx2);
>
> p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_12x16_avx2);
> - p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_16x4_avx2);
> - p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_16x8_avx2);
> - p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_avx2);
> - p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_avx2);
> - p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_avx2);
> - p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_avx2);
> p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_24x32_avx2);
> p.pu[LUMA_32x8].pixelavg_pp = PFX(pixel_avg_32x8_avx2);
> p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_32x16_avx2);
> @@ -1414,11 +1438,8 @@
> p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_64x64_avx2);
> p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_48x64_avx2);
>
> - p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
> - p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_avx2);
> p.pu[LUMA_8x16].addAvg = PFX(addAvg_8x16_avx2);
> p.pu[LUMA_8x32].addAvg = PFX(addAvg_8x32_avx2);
> - p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_avx2);
> p.pu[LUMA_16x4].addAvg = PFX(addAvg_16x4_avx2);
> p.pu[LUMA_16x8].addAvg = PFX(addAvg_16x8_avx2);
> p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_avx2);
> @@ -1437,13 +1458,9 @@
> p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_avx2);
> p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_avx2);
>
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg =
> PFX(addAvg_8x2_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg =
> PFX(addAvg_8x4_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg =
> PFX(addAvg_8x6_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg =
> PFX(addAvg_8x8_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg =
> PFX(addAvg_8x16_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg =
> PFX(addAvg_8x32_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg =
> PFX(addAvg_12x16_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg =
> PFX(addAvg_16x4_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg =
> PFX(addAvg_16x8_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg =
> PFX(addAvg_16x12_avx2);
> @@ -1453,7 +1470,6 @@
> p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg =
> PFX(addAvg_32x16_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg =
> PFX(addAvg_32x24_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg =
> PFX(addAvg_32x32_avx2);
> -
> p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg =
> PFX(addAvg_8x16_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg =
> PFX(addAvg_16x32_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg =
> PFX(addAvg_32x64_avx2);
> @@ -1463,12 +1479,10 @@
> p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg =
> PFX(addAvg_32x32_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg =
> PFX(addAvg_16x64_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg =
> PFX(addAvg_8x12_avx2);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg =
> PFX(addAvg_8x4_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg =
> PFX(addAvg_16x24_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg =
> PFX(addAvg_16x8_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg =
> PFX(addAvg_8x64_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg =
> PFX(addAvg_24x64_avx2);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg =
> PFX(addAvg_12x32_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg =
> PFX(addAvg_32x16_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg =
> PFX(addAvg_32x48_avx2);
>
> @@ -1491,20 +1505,14 @@
> p.cu[BLOCK_32x32].intra_pred[DC_IDX] = PFX(intra_pred_dc32_avx2);
>
> p.pu[LUMA_48x64].satd = PFX(pixel_satd_48x64_avx2);
> -
> p.pu[LUMA_64x16].satd = PFX(pixel_satd_64x16_avx2);
> p.pu[LUMA_64x32].satd = PFX(pixel_satd_64x32_avx2);
> p.pu[LUMA_64x48].satd = PFX(pixel_satd_64x48_avx2);
> p.pu[LUMA_64x64].satd = PFX(pixel_satd_64x64_avx2);
> -
> - p.pu[LUMA_32x8].satd = PFX(pixel_satd_32x8_avx2);
> p.pu[LUMA_32x16].satd = PFX(pixel_satd_32x16_avx2);
> p.pu[LUMA_32x24].satd = PFX(pixel_satd_32x24_avx2);
> p.pu[LUMA_32x32].satd = PFX(pixel_satd_32x32_avx2);
> p.pu[LUMA_32x64].satd = PFX(pixel_satd_32x64_avx2);
> -
> - p.pu[LUMA_16x4].satd = PFX(pixel_satd_16x4_avx2);
> - p.pu[LUMA_16x8].satd = PFX(pixel_satd_16x8_avx2);
> p.pu[LUMA_16x12].satd = PFX(pixel_satd_16x12_avx2);
> p.pu[LUMA_16x16].satd = PFX(pixel_satd_16x16_avx2);
> p.pu[LUMA_16x32].satd = PFX(pixel_satd_16x32_avx2);
> @@ -1534,8 +1542,6 @@
> p.cu[BLOCK_32x32].sse_ss = PFX(pixel_ssd_ss_32x32_avx2);
> p.cu[BLOCK_64x64].sse_ss = PFX(pixel_ssd_ss_64x64_avx2);
>
> - p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
> - p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
> p.cu[BLOCK_64x64].sse_pp = PFX(pixel_ssd_64x64_avx2);
> p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sse_pp =
> PFX(pixel_ssd_16x16_avx2);
> p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sse_pp =
> PFX(pixel_ssd_32x32_avx2);
> @@ -1546,7 +1552,6 @@
> p.quant = PFX(quant_avx2);
> p.nquant = PFX(nquant_avx2);
> p.dequant_normal = PFX(dequant_normal_avx2);
> - p.dequant_scaling = PFX(dequant_scaling_avx2);
> p.dst4x4 = PFX(dst4_avx2);
> p.idst4x4 = PFX(idst4_avx2);
> p.denoiseDct = PFX(denoise_dct_avx2);
> @@ -1565,9 +1570,17 @@
> p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_avx2);
> p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_avx2);
>
> - ALL_LUMA_TU(count_nonzero, count_nonzero, avx2);
> - ALL_LUMA_TU_S(cpy1Dto2D_shl, cpy1Dto2D_shl_, avx2);
> - ALL_LUMA_TU_S(cpy1Dto2D_shr, cpy1Dto2D_shr_, avx2);
> + p.cu[BLOCK_8x8].count_nonzero = PFX(count_nonzero_8x8_avx2);
> + p.cu[BLOCK_16x16].count_nonzero = PFX(count_nonzero_16x16_avx2);
> + p.cu[BLOCK_32x32].count_nonzero = PFX(count_nonzero_32x32_avx2);
> +
> + p.cu[BLOCK_8x8].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_8_avx2);
> + p.cu[BLOCK_16x16].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_16_avx2);
> + p.cu[BLOCK_32x32].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_32_avx2);
> +
> + p.cu[BLOCK_8x8].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_8_avx2);
> + p.cu[BLOCK_16x16].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_16_avx2);
> + p.cu[BLOCK_32x32].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_32_avx2);
>
> p.cu[BLOCK_8x8].copy_cnt = PFX(copy_cnt_8_avx2);
> p.cu[BLOCK_16x16].copy_cnt = PFX(copy_cnt_16_avx2);
> @@ -1581,12 +1594,12 @@
> p.cu[BLOCK_16x16].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_16_avx2);
> p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32_avx2);
>
> -#if X265_DEPTH <= 10
> - ALL_LUMA_TU_S(dct, dct, avx2);
> - ALL_LUMA_TU_S(idct, idct, avx2);
> -#endif
> + p.cu[BLOCK_4x4].dct = PFX(dct4_avx2);
> + p.cu[BLOCK_8x8].dct = PFX(dct8_avx2);
> + p.cu[BLOCK_4x4].idct = PFX(idct4_avx2);
> + p.cu[BLOCK_8x8].idct = PFX(idct8_avx2);
> +
> ALL_LUMA_CU_S(transpose, transpose, avx2);
> -
> ALL_LUMA_PU(luma_vpp, interp_8tap_vert_pp, avx2);
> ALL_LUMA_PU(luma_vps, interp_8tap_vert_ps, avx2);
> #if X265_DEPTH <= 10
> @@ -1650,16 +1663,7 @@
> p.pu[LUMA_64x64].sad_x3 = PFX(pixel_sad_x3_64x64_avx2);
>
> p.pu[LUMA_16x4].sad_x4 = PFX(pixel_sad_x4_16x4_avx2);
> - p.pu[LUMA_16x8].sad_x4 = PFX(pixel_sad_x4_16x8_avx2);
> - p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx2);
> - p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx2);
> - p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx2);
> p.pu[LUMA_16x64].sad_x4 = PFX(pixel_sad_x4_16x64_avx2);
> - p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx2);
> - p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx2);
> - p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx2);
> - p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx2);
> - p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx2);
> p.pu[LUMA_48x64].sad_x4 = PFX(pixel_sad_x4_48x64_avx2);
> p.pu[LUMA_64x16].sad_x4 = PFX(pixel_sad_x4_64x16_avx2);
> p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_avx2);
> @@ -1879,7 +1883,6 @@
> p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_hpp =
> PFX(interp_4tap_horiz_pp_48x64_avx2);
>
> p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vpp =
> PFX(interp_4tap_vert_pp_4x2_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps =
> PFX(interp_4tap_vert_ps_4x2_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vsp =
> PFX(interp_4tap_vert_sp_4x2_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vss =
> PFX(interp_4tap_vert_ss_4x2_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vpp =
> PFX(interp_4tap_vert_pp_4x4_avx2);
> @@ -1988,19 +1991,14 @@
> p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vsp =
> PFX(interp_4tap_vert_sp_8x32_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vss =
> PFX(interp_4tap_vert_ss_8x32_avx2);
>
> -
> p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vss =
> PFX(interp_4tap_vert_ss_6x8_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vsp =
> PFX(interp_4tap_vert_sp_6x8_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vps =
> PFX(interp_4tap_vert_ps_6x8_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vpp =
> PFX(interp_4tap_vert_pp_6x8_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vpp =
> PFX(interp_4tap_vert_pp_12x16_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vps =
> PFX(interp_4tap_vert_ps_12x16_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vss =
> PFX(interp_4tap_vert_ss_12x16_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vsp =
> PFX(interp_4tap_vert_sp_12x16_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp =
> PFX(interp_4tap_vert_pp_16x4_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp =
> PFX(interp_4tap_vert_pp_16x8_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vpp =
> PFX(interp_4tap_vert_pp_16x12_avx2);
> - p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp =
> PFX(interp_4tap_vert_pp_16x16_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vpp =
> PFX(interp_4tap_vert_pp_16x32_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vps =
> PFX(interp_4tap_vert_ps_16x4_avx2);
> p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vps =
> PFX(interp_4tap_vert_ps_16x8_avx2);
> @@ -2045,9 +2043,6 @@
> p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vps =
> PFX(interp_4tap_vert_ps_12x32_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vss =
> PFX(interp_4tap_vert_ss_12x32_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vsp =
> PFX(interp_4tap_vert_sp_12x32_avx2);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vpp =
> PFX(interp_4tap_vert_pp_16x8_avx2);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vpp =
> PFX(interp_4tap_vert_pp_16x16_avx2);
> - p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vpp =
> PFX(interp_4tap_vert_pp_16x24_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vpp =
> PFX(interp_4tap_vert_pp_16x32_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vpp =
> PFX(interp_4tap_vert_pp_16x64_avx2);
> p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vps =
> PFX(interp_4tap_vert_ps_16x8_avx2);
> @@ -2089,10 +2084,7 @@
> p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vps =
> PFX(interp_4tap_vert_ps_12x16_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vss =
> PFX(interp_4tap_vert_ss_12x16_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vsp =
> PFX(interp_4tap_vert_sp_12x16_avx2);
> - p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vpp =
> PFX(interp_4tap_vert_pp_16x4_avx2);
> - p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vpp =
> PFX(interp_4tap_vert_pp_16x8_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vpp =
> PFX(interp_4tap_vert_pp_16x12_avx2);
> - p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vpp =
> PFX(interp_4tap_vert_pp_16x16_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vpp =
> PFX(interp_4tap_vert_pp_16x32_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vpp =
> PFX(interp_4tap_vert_pp_16x64_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vps =
> PFX(interp_4tap_vert_ps_16x4_avx2);
> @@ -2157,6 +2149,73 @@
> p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vsp =
> PFX(interp_4tap_vert_sp_64x32_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vsp =
> PFX(interp_4tap_vert_sp_64x48_avx2);
> p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vsp =
> PFX(interp_4tap_vert_sp_64x64_avx2);
> +
> + /* The following primitives have been disabled since performance
> compared to SSE is negligible/negative */
> +#if 0
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp =
> PFX(interp_4tap_vert_pp_16x4_avx2);
> + p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vpp =
> PFX(interp_4tap_vert_pp_16x4_avx2);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vpp =
> PFX(interp_4tap_vert_pp_16x24_avx2);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp =
> PFX(interp_4tap_vert_pp_16x8_avx2);
> + p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vpp =
> PFX(interp_4tap_vert_pp_16x8_avx2);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vpp =
> PFX(interp_4tap_vert_pp_16x8_avx2);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp =
> PFX(interp_4tap_vert_pp_16x16_avx2);
> + p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vpp =
> PFX(interp_4tap_vert_pp_16x16_avx2);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vpp =
> PFX(interp_4tap_vert_pp_16x16_avx2);
> +
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps =
> PFX(interp_4tap_vert_ps_4x2_avx2);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vps =
> PFX(interp_4tap_vert_ps_6x8_avx2);
> +
> + p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_avx2);
> + p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
> + p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_avx2);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg =
> PFX(addAvg_12x16_avx2);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg =
> PFX(addAvg_12x32_avx2);
> + p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg =
> PFX(addAvg_8x4_avx2);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg =
> PFX(addAvg_8x4_avx2);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg =
> PFX(addAvg_8x6_avx2);
> + p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg =
> PFX(addAvg_8x2_avx2);
> +
> + ALL_LUMA_TU_S(cpy1Dto2D_shl, cpy1Dto2D_shl_, avx2);
> + ALL_LUMA_TU_S(cpy1Dto2D_shr, cpy1Dto2D_shr_, avx2);
> + p.cu[BLOCK_4x4].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_4_avx2);
> + p.cu[BLOCK_4x4].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_4_avx2);
> +
> + ALL_LUMA_TU(count_nonzero, count_nonzero, avx2);
> + p.cu[BLOCK_4x4].count_nonzero = PFX(count_nonzero_4x4_avx2);
> +
> + p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_16x4_avx2);
> + p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_16x8_avx2);
> + p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_avx2);
> + p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_avx2);
> + p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_avx2);
> + p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_avx2);
> +
> + p.pu[LUMA_32x8].satd = PFX(pixel_satd_32x8_avx2);
> + p.pu[LUMA_16x4].satd = PFX(pixel_satd_16x4_avx2);
> + p.pu[LUMA_16x8].satd = PFX(pixel_satd_16x8_avx2);
> +
> + p.pu[LUMA_16x8].sad_x4 = PFX(pixel_sad_x4_16x8_avx2);
> + p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx2);
> + p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx2);
> + p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx2);
> + p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx2);
> + p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx2);
> + p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx2);
> + p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx2);
> + p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx2);
> +
> + p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
> + p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
> +
> + p.dequant_scaling = PFX(dequant_scaling_avx2);
> +
> + ALL_LUMA_TU_S(dct, dct, avx2);
> + p.cu[BLOCK_16x16].dct = PFX(dct16_avx2);
> + p.cu[BLOCK_32x32].dct = PFX(dct32_avx2);
> + ALL_LUMA_TU_S(idct, idct, avx2);
> + p.cu[BLOCK_16x16].idct = PFX(idct16_avx2);
> + p.cu[BLOCK_32x32].idct = PFX(idct32_avx2);
> +#endif
> #endif
>
> p.frameInitLowres = PFX(frame_init_lowres_core_avx2);
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20150818/526437d8/attachment-0001.html>
More information about the x265-devel
mailing list