[x265] [PATCH] asm: disabled 10bpp AVX & AVX2 primitives having less than 3% speed up over SSE

aasaipriya at multicorewareinc.com aasaipriya at multicorewareinc.com
Mon Aug 17 14:14:05 CEST 2015


# HG changeset patch
# User Aasaipriya Chandran <aasaipriya at multicorewareinc.com>
# Date 1439813601 -19800
#      Mon Aug 17 17:43:21 2015 +0530
# Node ID 458c015656c2f66ffc696484712540e1b8e6588d
# Parent  4a6143fe6658534aec83c9ba3db386d118550196
asm: disabled 10bpp AVX & AVX2 primitives having less than 3% speed up over SSE

these primitives are slower than SSE primitives

diff -r 4a6143fe6658 -r 458c015656c2 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Aug 17 11:56:37 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Mon Aug 17 17:43:21 2015 +0530
@@ -1205,10 +1205,6 @@
         p.pu[LUMA_64x16].satd = PFX(pixel_satd_64x16_avx);
         p.pu[LUMA_16x64].satd = PFX(pixel_satd_16x64_avx);
 
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d = PFX(pixel_sa8d_8x8_avx);
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d = PFX(pixel_sa8d_16x16_avx);
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d = PFX(pixel_sa8d_32x32_avx);
-
         p.cu[BLOCK_16x16].var = PFX(pixel_var_16x16_avx);
 
         // copy_pp primitives
@@ -1326,6 +1322,9 @@
         p.cu[BLOCK_64x64].var = PFX(pixel_var_64x64_avx)
 
         ASSIGN_SA8D(avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d = PFX(pixel_sa8d_8x8_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d = PFX(pixel_sa8d_16x16_avx);
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d = PFX(pixel_sa8d_32x32_avx);
 
         p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_avx);
         p.ssim_end_4 = PFX(pixel_ssim_end4_avx);
@@ -1427,12 +1426,6 @@
         p.cu[BLOCK_32x32].intra_pred[34]    = PFX(intra_pred_ang32_2_avx2);
 
         p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_12x16_avx2);
-        p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_16x4_avx2);
-        p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_16x8_avx2);
-        p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_avx2);
-        p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_avx2);
-        p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_avx2);
-        p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_avx2);
         p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_24x32_avx2);
         p.pu[LUMA_32x8].pixelavg_pp = PFX(pixel_avg_32x8_avx2);
         p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_32x16_avx2);
@@ -1445,11 +1438,8 @@
         p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_64x64_avx2);
         p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_48x64_avx2);
 
-        p.pu[LUMA_8x4].addAvg   = PFX(addAvg_8x4_avx2);
-        p.pu[LUMA_8x8].addAvg   = PFX(addAvg_8x8_avx2);
         p.pu[LUMA_8x16].addAvg  = PFX(addAvg_8x16_avx2);
         p.pu[LUMA_8x32].addAvg  = PFX(addAvg_8x32_avx2);
-        p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_avx2);
         p.pu[LUMA_16x4].addAvg  = PFX(addAvg_16x4_avx2);
         p.pu[LUMA_16x8].addAvg  = PFX(addAvg_16x8_avx2);
         p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_avx2);
@@ -1468,13 +1458,9 @@
         p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_avx2);
         p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_avx2);
 
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg   = PFX(addAvg_8x2_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg   = PFX(addAvg_8x4_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg   = PFX(addAvg_8x6_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg   = PFX(addAvg_8x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg  = PFX(addAvg_8x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg  = PFX(addAvg_8x32_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg  = PFX(addAvg_16x4_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg  = PFX(addAvg_16x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_avx2);
@@ -1484,7 +1470,6 @@
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = PFX(addAvg_32x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_avx2);
-
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg = PFX(addAvg_8x16_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg = PFX(addAvg_16x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = PFX(addAvg_32x64_avx2);
@@ -1494,12 +1479,10 @@
         p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = PFX(addAvg_32x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg = PFX(addAvg_16x64_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg = PFX(addAvg_8x12_avx2);
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg = PFX(addAvg_16x8_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg = PFX(addAvg_8x64_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg = PFX(addAvg_24x64_avx2);
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = PFX(addAvg_32x16_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = PFX(addAvg_32x48_avx2);
 
@@ -1522,20 +1505,14 @@
         p.cu[BLOCK_32x32].intra_pred[DC_IDX] = PFX(intra_pred_dc32_avx2);
 
         p.pu[LUMA_48x64].satd = PFX(pixel_satd_48x64_avx2);
-
         p.pu[LUMA_64x16].satd = PFX(pixel_satd_64x16_avx2);
         p.pu[LUMA_64x32].satd = PFX(pixel_satd_64x32_avx2);
         p.pu[LUMA_64x48].satd = PFX(pixel_satd_64x48_avx2);
         p.pu[LUMA_64x64].satd = PFX(pixel_satd_64x64_avx2);
-
-        p.pu[LUMA_32x8].satd = PFX(pixel_satd_32x8_avx2);
         p.pu[LUMA_32x16].satd = PFX(pixel_satd_32x16_avx2);
         p.pu[LUMA_32x24].satd = PFX(pixel_satd_32x24_avx2);
         p.pu[LUMA_32x32].satd = PFX(pixel_satd_32x32_avx2);
         p.pu[LUMA_32x64].satd = PFX(pixel_satd_32x64_avx2);
-
-        p.pu[LUMA_16x4].satd = PFX(pixel_satd_16x4_avx2);
-        p.pu[LUMA_16x8].satd = PFX(pixel_satd_16x8_avx2);
         p.pu[LUMA_16x12].satd = PFX(pixel_satd_16x12_avx2);
         p.pu[LUMA_16x16].satd = PFX(pixel_satd_16x16_avx2);
         p.pu[LUMA_16x32].satd = PFX(pixel_satd_16x32_avx2);
@@ -1565,8 +1542,6 @@
         p.cu[BLOCK_32x32].sse_ss = PFX(pixel_ssd_ss_32x32_avx2);
         p.cu[BLOCK_64x64].sse_ss = PFX(pixel_ssd_ss_64x64_avx2);
 
-        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
-        p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
         p.cu[BLOCK_64x64].sse_pp = PFX(pixel_ssd_64x64_avx2);
         p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
         p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
@@ -1577,7 +1552,6 @@
         p.quant = PFX(quant_avx2);
         p.nquant = PFX(nquant_avx2);
         p.dequant_normal  = PFX(dequant_normal_avx2);
-        p.dequant_scaling = PFX(dequant_scaling_avx2);
         p.dst4x4 = PFX(dst4_avx2);
         p.idst4x4 = PFX(idst4_avx2);
         p.denoiseDct = PFX(denoise_dct_avx2);
@@ -1596,9 +1570,17 @@
         p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_avx2);
         p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_avx2);
 
-        ALL_LUMA_TU(count_nonzero, count_nonzero, avx2);
-        ALL_LUMA_TU_S(cpy1Dto2D_shl, cpy1Dto2D_shl_, avx2);
-        ALL_LUMA_TU_S(cpy1Dto2D_shr, cpy1Dto2D_shr_, avx2);
+        p.cu[BLOCK_8x8].count_nonzero = PFX(count_nonzero_8x8_avx2);
+        p.cu[BLOCK_16x16].count_nonzero = PFX(count_nonzero_16x16_avx2);
+        p.cu[BLOCK_32x32].count_nonzero = PFX(count_nonzero_32x32_avx2);
+
+        p.cu[BLOCK_8x8].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_8_avx2);
+        p.cu[BLOCK_16x16].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_32_avx2);
+
+        p.cu[BLOCK_8x8].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_8_avx2);
+        p.cu[BLOCK_16x16].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_32_avx2);
 
         p.cu[BLOCK_8x8].copy_cnt = PFX(copy_cnt_8_avx2);
         p.cu[BLOCK_16x16].copy_cnt = PFX(copy_cnt_16_avx2);
@@ -1612,10 +1594,10 @@
         p.cu[BLOCK_16x16].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_16_avx2);
         p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32_avx2);
 
-#if X265_DEPTH <= 10
-        ALL_LUMA_TU_S(dct, dct, avx2);
-        ALL_LUMA_TU_S(idct, idct, avx2);
-#endif
+        p.cu[BLOCK_4x4].dct = PFX(dct4_avx2);
+        p.cu[BLOCK_8x8].dct = PFX(dct8_avx2);
+        p.cu[BLOCK_4x4].idct = PFX(idct4_avx2);
+        p.cu[BLOCK_8x8].idct = PFX(idct8_avx2);
         ALL_LUMA_CU_S(transpose, transpose, avx2);
 
         ALL_LUMA_PU(luma_vpp, interp_8tap_vert_pp, avx2);
@@ -1681,16 +1663,7 @@
         p.pu[LUMA_64x64].sad_x3 = PFX(pixel_sad_x3_64x64_avx2);
 
         p.pu[LUMA_16x4].sad_x4 = PFX(pixel_sad_x4_16x4_avx2);
-        p.pu[LUMA_16x8].sad_x4 = PFX(pixel_sad_x4_16x8_avx2);
-        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx2);
-        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx2);
-        p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx2);
         p.pu[LUMA_16x64].sad_x4 = PFX(pixel_sad_x4_16x64_avx2);
-        p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx2);
-        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx2);
-        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx2);
-        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx2);
-        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx2);
         p.pu[LUMA_48x64].sad_x4 = PFX(pixel_sad_x4_48x64_avx2);
         p.pu[LUMA_64x16].sad_x4 = PFX(pixel_sad_x4_64x16_avx2);
         p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_avx2);
@@ -1910,7 +1883,6 @@
         p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_hpp = PFX(interp_4tap_horiz_pp_48x64_avx2);
 
         p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vpp = PFX(interp_4tap_vert_pp_4x2_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps = PFX(interp_4tap_vert_ps_4x2_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vsp = PFX(interp_4tap_vert_sp_4x2_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vss = PFX(interp_4tap_vert_ss_4x2_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vpp = PFX(interp_4tap_vert_pp_4x4_avx2);
@@ -2018,20 +1990,14 @@
         p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vps = PFX(interp_4tap_vert_ps_8x32_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vsp = PFX(interp_4tap_vert_sp_8x32_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_8x32].filter_vss = PFX(interp_4tap_vert_ss_8x32_avx2);
-
-
         p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vss = PFX(interp_4tap_vert_ss_6x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vsp = PFX(interp_4tap_vert_sp_6x8_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vps = PFX(interp_4tap_vert_ps_6x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vpp = PFX(interp_4tap_vert_pp_6x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vpp = PFX(interp_4tap_vert_pp_12x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vps = PFX(interp_4tap_vert_ps_12x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vss = PFX(interp_4tap_vert_ss_12x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vsp = PFX(interp_4tap_vert_sp_12x16_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vpp = PFX(interp_4tap_vert_pp_16x12_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vps = PFX(interp_4tap_vert_ps_16x4_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
@@ -2076,9 +2042,6 @@
         p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vps = PFX(interp_4tap_vert_ps_12x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vss = PFX(interp_4tap_vert_ss_12x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].filter_vsp = PFX(interp_4tap_vert_sp_12x32_avx2);
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vpp = PFX(interp_4tap_vert_pp_16x24_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vpp = PFX(interp_4tap_vert_pp_16x64_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vps = PFX(interp_4tap_vert_ps_16x8_avx2);
@@ -2120,10 +2083,7 @@
         p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vps = PFX(interp_4tap_vert_ps_12x16_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vss = PFX(interp_4tap_vert_ss_12x16_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_12x16].filter_vsp = PFX(interp_4tap_vert_sp_12x16_avx2);
-        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
-        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_16x12].filter_vpp = PFX(interp_4tap_vert_pp_16x12_avx2);
-        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vpp = PFX(interp_4tap_vert_pp_16x64_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vps = PFX(interp_4tap_vert_ps_16x4_avx2);
@@ -2188,6 +2148,74 @@
         p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vsp = PFX(interp_4tap_vert_sp_64x32_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vsp = PFX(interp_4tap_vert_sp_64x48_avx2);
         p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vsp = PFX(interp_4tap_vert_sp_64x64_avx2);
+
+        /* The following primitives have been disabled since performance compared to SSE is negligible/negative */
+#if 0
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].filter_vpp = PFX(interp_4tap_vert_pp_16x24_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I444].pu[LUMA_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
+
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps = PFX(interp_4tap_vert_ps_4x2_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].filter_vps = PFX(interp_4tap_vert_ps_6x8_avx2);
+
+        p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_avx2);
+        p.pu[LUMA_8x4].addAvg   = PFX(addAvg_8x4_avx2);
+        p.pu[LUMA_8x8].addAvg   = PFX(addAvg_8x8_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_avx2);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg = PFX(addAvg_8x4_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg   = PFX(addAvg_8x6_avx2);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg   = PFX(addAvg_8x2_avx2);
+
+        ALL_LUMA_TU_S(cpy1Dto2D_shl, cpy1Dto2D_shl_, avx2);
+        ALL_LUMA_TU_S(cpy1Dto2D_shr, cpy1Dto2D_shr_, avx2);
+        p.cu[BLOCK_4x4].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_4_avx2);
+        p.cu[BLOCK_4x4].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_4_avx2);
+
+        ALL_LUMA_TU(count_nonzero, count_nonzero, avx2);
+        p.cu[BLOCK_4x4].count_nonzero = PFX(count_nonzero_4x4_avx2);
+
+        p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_16x4_avx2);
+        p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_16x8_avx2);
+        p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_avx2);
+        p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_avx2);
+        p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_avx2);
+        p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_avx2);
+
+        p.pu[LUMA_32x8].satd = PFX(pixel_satd_32x8_avx2);
+        p.pu[LUMA_16x4].satd = PFX(pixel_satd_16x4_avx2);
+        p.pu[LUMA_16x8].satd = PFX(pixel_satd_16x8_avx2);
+
+        p.pu[LUMA_16x8].sad_x4 = PFX(pixel_sad_x4_16x8_avx2);
+        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx2);
+        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx2);
+        p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx2);
+        p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx2);
+        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx2);
+        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx2);
+        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx2);
+        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx2);
+
+        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx2);
+        p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx2);
+
+        p.dequant_scaling = PFX(dequant_scaling_avx2);
+
+        ALL_LUMA_TU_S(dct, dct, avx2);
+        p.cu[BLOCK_16x16].dct = PFX(dct16_avx2);
+        p.cu[BLOCK_32x32].dct = PFX(dct32_avx2);
+        ALL_LUMA_TU_S(idct, idct, avx2);
+        p.cu[BLOCK_16x16].idct = PFX(idct16_avx2);
+        p.cu[BLOCK_32x32].idct = PFX(idct32_avx2);
+#endif
+
 #endif
 
         p.frameInitLowres = PFX(frame_init_lowres_core_avx2);


More information about the x265-devel mailing list