[x265-commits] [x265] asm: disabled AVX & AVX2 primitives having less than 3% s...

Dnyaneshwar G dnyaneshwar at multicorewareinc.com
Sun Aug 9 20:09:45 CEST 2015


details:   http://hg.videolan.org/x265/rev/3eb2ec5922be
branches:  
changeset: 10868:3eb2ec5922be
user:      Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
date:      Wed Aug 05 12:20:01 2015 +0530
description:
asm: disabled AVX & AVX2 primitives having less than 3% speed up over SSE

these primitives are slower than SSE primitives
Subject: [x265] asm: updated avx2 algorithm for copy_ps 32xN & 64xN, improved over 45% than SSE asm

details:   http://hg.videolan.org/x265/rev/377a996a8d74
branches:  
changeset: 10869:377a996a8d74
user:      Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
date:      Wed Aug 05 15:09:14 2015 +0530
description:
asm: updated avx2 algorithm for copy_ps 32xN & 64xN, improved over 45% than SSE asm
Subject: [x265] stats: use input source pictures to calculate average and max luma level

details:   http://hg.videolan.org/x265/rev/9982ac741d18
branches:  
changeset: 10870:9982ac741d18
user:      Kavitha Sampath <kavitha at multicorewareinc.com>
date:      Thu Aug 06 10:29:06 2015 +0530
description:
stats: use input source pictures to calculate average and max luma level
Subject: [x265] asm: new algorithm for intra_ang_16 modes 3 & 33, improved over 23%

details:   http://hg.videolan.org/x265/rev/e9fb68e4bf1e
branches:  
changeset: 10871:e9fb68e4bf1e
user:      Rajesh Paulraj<rajesh at multicorewareinc.com>
date:      Thu Aug 06 13:02:22 2015 +0530
description:
asm: new algorithm for intra_ang_16 modes 3 & 33, improved over 23%
than previous avx2 code
Subject: [x265] asm: new algorithm for intra_ang_16 modes 4 & 32, improved over 15%

details:   http://hg.videolan.org/x265/rev/27773f08c57b
branches:  
changeset: 10872:27773f08c57b
user:      Rajesh Paulraj<rajesh at multicorewareinc.com>
date:      Thu Aug 06 13:08:58 2015 +0530
description:
asm: new algorithm for intra_ang_16 modes 4 & 32, improved over 15%
than previous avx2 code
Subject: [x265] asm: new algorithm for intra_ang_16 mode 5, improved over 16% than previous avx2 code

details:   http://hg.videolan.org/x265/rev/935a2c843ec0
branches:  
changeset: 10873:935a2c843ec0
user:      Rajesh Paulraj<rajesh at multicorewareinc.com>
date:      Thu Aug 06 13:14:07 2015 +0530
description:
asm: new algorithm for intra_ang_16 mode 5, improved over 16% than previous avx2 code
Subject: [x265] asm: avx2 code for intra_ang_16 mode 6, replace old avx2 code

details:   http://hg.videolan.org/x265/rev/0d0f17e1f833
branches:  
changeset: 10874:0d0f17e1f833
user:      Rajesh Paulraj<rajesh at multicorewareinc.com>
date:      Thu Aug 06 14:12:23 2015 +0530
description:
asm: avx2 code for intra_ang_16 mode 6, replace old avx2 code
with less constant table and 7% performance improvement
Subject: [x265] asm: avx2 code for intra_ang_16 mode 7, replace old avx2 code

details:   http://hg.videolan.org/x265/rev/609a81351f09
branches:  
changeset: 10875:609a81351f09
user:      Rajesh Paulraj<rajesh at multicorewareinc.com>
date:      Thu Aug 06 14:20:08 2015 +0530
description:
asm: avx2 code for intra_ang_16 mode 7, replace old avx2 code
with less constant table and same performance
Subject: [x265] asm: avx2 code for intra_ang_16 mode 8, replace old avx2 code

details:   http://hg.videolan.org/x265/rev/cbdefdfca877
branches:  
changeset: 10876:cbdefdfca877
user:      Rajesh Paulraj<rajesh at multicorewareinc.com>
date:      Thu Aug 06 14:23:43 2015 +0530
description:
asm: avx2 code for intra_ang_16 mode 8, replace old avx2 code
with less constant table and same performance
Subject: [x265] Merge with default; prepare for 1.8

details:   http://hg.videolan.org/x265/rev/4781e6cef251
branches:  stable
changeset: 10877:4781e6cef251
user:      Deepthi Nandakumar <deepthi at multicorewareinc.com>
date:      Fri Aug 07 12:29:40 2015 +0530
description:
Merge with default; prepare for 1.8

diffstat:

 source/common/x86/asm-primitives.cpp |   179 ++--
 source/common/x86/blockcopy8.asm     |   150 +--
 source/common/x86/intrapred8.asm     |  1412 ++++++++++++++++-----------------
 source/encoder/frameencoder.cpp      |    17 +-
 4 files changed, 827 insertions(+), 931 deletions(-)

diffs (truncated from 2103 to 300 lines):

diff -r 3fa7f6838098 -r 4781e6cef251 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Aug 03 14:56:21 2015 -0500
+++ b/source/common/x86/asm-primitives.cpp	Fri Aug 07 12:29:40 2015 +0530
@@ -2568,7 +2568,6 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].satd = PFX(pixel_satd_16x16_avx);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].satd = PFX(pixel_satd_32x32_avx);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].satd = PFX(pixel_satd_16x64_avx);
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].satd = PFX(pixel_satd_16x8_avx);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].satd = PFX(pixel_satd_32x16_avx);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].satd = PFX(pixel_satd_8x16_avx);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].satd = PFX(pixel_satd_8x8_avx);
@@ -2578,7 +2577,6 @@ void setupAssemblyPrimitives(EncoderPrim
         p.pu[LUMA_16x16].satd = PFX(pixel_satd_16x16_avx);
         p.pu[LUMA_32x32].satd = PFX(pixel_satd_32x32_avx);
         p.pu[LUMA_64x64].satd = PFX(pixel_satd_64x64_avx);
-        p.pu[LUMA_16x8].satd  = PFX(pixel_satd_16x8_avx);
         p.pu[LUMA_8x16].satd  = PFX(pixel_satd_8x16_avx);
         p.pu[LUMA_16x32].satd = PFX(pixel_satd_16x32_avx);
         p.pu[LUMA_32x16].satd = PFX(pixel_satd_32x16_avx);
@@ -2586,10 +2584,8 @@ void setupAssemblyPrimitives(EncoderPrim
         p.pu[LUMA_32x64].satd = PFX(pixel_satd_32x64_avx);
         p.pu[LUMA_16x12].satd = PFX(pixel_satd_16x12_avx);
 
-        p.pu[LUMA_16x4].satd  = PFX(pixel_satd_16x4_avx);
         p.pu[LUMA_32x24].satd = PFX(pixel_satd_32x24_avx);
         p.pu[LUMA_24x32].satd = PFX(pixel_satd_24x32_avx);
-        p.pu[LUMA_32x8].satd  = PFX(pixel_satd_32x8_avx);
         p.pu[LUMA_8x32].satd  = PFX(pixel_satd_8x32_avx);
         p.pu[LUMA_64x48].satd = PFX(pixel_satd_64x48_avx);
         p.pu[LUMA_48x64].satd = PFX(pixel_satd_48x64_avx);
@@ -2599,38 +2595,28 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].satd = PFX(pixel_satd_8x8_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].satd = PFX(pixel_satd_16x16_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].satd = PFX(pixel_satd_32x32_avx);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].satd = PFX(pixel_satd_16x8_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].satd = PFX(pixel_satd_8x16_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].satd = PFX(pixel_satd_32x16_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].satd = PFX(pixel_satd_16x32_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].satd = PFX(pixel_satd_16x12_avx);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].satd = PFX(pixel_satd_16x4_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].satd = PFX(pixel_satd_32x24_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].satd = PFX(pixel_satd_24x32_avx);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].satd = PFX(pixel_satd_32x8_avx);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].satd = PFX(pixel_satd_8x32_avx);
-        ASSIGN_SA8D(avx);
+
+        p.cu[BLOCK_64x64].sa8d = PFX(pixel_sa8d_64x64_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sa8d = PFX(pixel_sa8d_8x16_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sa8d = PFX(pixel_sa8d_16x32_avx);
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sa8d = PFX(pixel_sa8d_32x64_avx);
+
         p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].sa8d = PFX(pixel_sa8d_32x32_avx);
         p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].sa8d = PFX(pixel_sa8d_16x16_avx);
         p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sa8d = PFX(pixel_sa8d_8x8_avx);
 
-        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx);
-        p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx);
-
         p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].sse_pp = PFX(pixel_ssd_8x8_avx);
 
         p.cu[BLOCK_16x16].var = PFX(pixel_var_16x16_avx);
 
         p.pu[LUMA_16x4].sad_x4  = PFX(pixel_sad_x4_16x4_avx);
-        p.pu[LUMA_16x8].sad_x4  = PFX(pixel_sad_x4_16x8_avx);
-        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx);
-        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx);
-        p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx);
-        p.pu[LUMA_32x8].sad_x4  = PFX(pixel_sad_x4_32x8_avx);
-        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx);
-        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx);
-        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx);
-        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx);
 
         p.cu[BLOCK_16x16].copy_ss = PFX(blockcopy_ss_16x16_avx);
         p.cu[BLOCK_32x32].copy_ss = PFX(blockcopy_ss_32x32_avx);
@@ -2666,10 +2652,18 @@ void setupAssemblyPrimitives(EncoderPrim
 
         p.pu[LUMA_48x64].copy_pp = PFX(blockcopy_pp_48x64_avx);
 
-        p.frameInitLowres = PFX(frame_init_lowres_core_avx);
-
         /* The following primitives have been disabled since performance compared to SSE4.2 is negligible/negative */
 #if 0
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].satd = PFX(pixel_satd_16x4_avx);
+        p.pu[LUMA_16x4].satd  = PFX(pixel_satd_16x4_avx);
+        p.pu[LUMA_16x8].satd  = PFX(pixel_satd_16x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].satd = PFX(pixel_satd_16x8_avx);
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].satd = PFX(pixel_satd_16x8_avx);
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].satd = PFX(pixel_satd_32x8_avx);
+        p.pu[LUMA_32x8].satd  = PFX(pixel_satd_32x8_avx);
+        p.cu[BLOCK_8x8].sa8d   = PFX(pixel_sa8d_8x8_avx);
+        p.cu[BLOCK_16x16].sa8d = PFX(pixel_sa8d_16x16_avx);
+        p.cu[BLOCK_32x32].sa8d = PFX(pixel_sa8d_32x32_avx);
         p.pu[LUMA_4x4].satd = p.cu[BLOCK_4x4].sa8d = PFX(pixel_satd_4x4_avx);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd = PFX(pixel_satd_8x4_avx);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd = PFX(pixel_satd_4x8_avx);
@@ -2686,6 +2680,8 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd = PFX(pixel_satd_4x16_avx);
         p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].sa8d = PFX(pixel_satd_4x4_avx);
 
+        p.cu[BLOCK_16x16].sse_pp = PFX(pixel_ssd_16x16_avx);
+        p.cu[BLOCK_32x32].sse_pp = PFX(pixel_ssd_32x32_avx);
         p.cu[BLOCK_8x8].sse_pp   = PFX(pixel_ssd_8x8_avx);
         p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].sse_pp = PFX(pixel_ssd_8x16_avx);
         p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sse_pp = PFX(pixel_ssd_16x32_avx);
@@ -2699,6 +2695,15 @@ void setupAssemblyPrimitives(EncoderPrim
         p.pu[LUMA_16x4].sad_x3  = PFX(pixel_sad_x3_16x4_avx);
         HEVC_SAD_X3(avx);
 
+        p.pu[LUMA_32x8].sad_x4  = PFX(pixel_sad_x4_32x8_avx);
+        p.pu[LUMA_32x16].sad_x4 = PFX(pixel_sad_x4_32x16_avx);
+        p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx);
+        p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx);
+        p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx);
+        p.pu[LUMA_16x8].sad_x4  = PFX(pixel_sad_x4_16x8_avx);
+        p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx);
+        p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx);
+        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx);
         p.pu[LUMA_12x16].sad_x4 = PFX(pixel_sad_x4_12x16_avx);
         p.pu[LUMA_16x64].sad_x4 = PFX(pixel_sad_x4_16x64_avx);
         p.pu[LUMA_24x32].sad_x4 = PFX(pixel_sad_x4_24x32_avx);
@@ -2710,6 +2715,7 @@ void setupAssemblyPrimitives(EncoderPrim
 
         p.ssim_4x4x2_core = PFX(pixel_ssim_4x4x2_core_avx);
         p.ssim_end_4 = PFX(pixel_ssim_end4_avx);
+        p.frameInitLowres = PFX(frame_init_lowres_core_avx);
 #endif
     }
     if (cpuMask & X265_CPU_XOP)
@@ -2759,13 +2765,8 @@ void setupAssemblyPrimitives(EncoderPrim
         p.cu[BLOCK_32x32].psy_cost_pp = PFX(psyCost_pp_32x32_avx2);
         p.cu[BLOCK_64x64].psy_cost_pp = PFX(psyCost_pp_64x64_avx2);
 
-        p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
-        p.pu[LUMA_8x8].addAvg = PFX(addAvg_8x8_avx2);
         p.pu[LUMA_8x16].addAvg = PFX(addAvg_8x16_avx2);
         p.pu[LUMA_8x32].addAvg = PFX(addAvg_8x32_avx2);
-
-        p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_avx2);
-
         p.pu[LUMA_16x4].addAvg = PFX(addAvg_16x4_avx2);
         p.pu[LUMA_16x8].addAvg = PFX(addAvg_16x8_avx2);
         p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_avx2);
@@ -2788,13 +2789,9 @@ void setupAssemblyPrimitives(EncoderPrim
         p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_avx2);
         p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_avx2);
 
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg = PFX(addAvg_8x2_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg = PFX(addAvg_8x4_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg = PFX(addAvg_8x6_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg = PFX(addAvg_8x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg = PFX(addAvg_8x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg = PFX(addAvg_8x32_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg = PFX(addAvg_16x4_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg = PFX(addAvg_16x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_avx2);
@@ -2805,13 +2802,11 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_avx2);
 
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg = PFX(addAvg_8x4_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg = PFX(addAvg_8x8_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg = PFX(addAvg_8x12_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg = PFX(addAvg_8x16_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg = PFX(addAvg_8x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg = PFX(addAvg_8x64_avx2);
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg = PFX(addAvg_16x8_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg = PFX(addAvg_16x16_avx2);
         p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_avx2);
@@ -2846,13 +2841,6 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].sub_ps = PFX(pixel_sub_ps_16x32_avx2);
         p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sub_ps = PFX(pixel_sub_ps_32x64_avx2);
 
-        p.pu[LUMA_16x4].pixelavg_pp = PFX(pixel_avg_16x4_avx2);
-        p.pu[LUMA_16x8].pixelavg_pp = PFX(pixel_avg_16x8_avx2);
-        p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_16x12_avx2);
-        p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_16x16_avx2);
-        p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_16x32_avx2);
-        p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_16x64_avx2);
-
         p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_32x64_avx2);
         p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_32x32_avx2);
         p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_32x24_avx2);
@@ -2896,13 +2884,8 @@ void setupAssemblyPrimitives(EncoderPrim
         p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_avx2);
         p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_avx2);
 
-        p.pu[LUMA_8x4].sad_x3 = PFX(pixel_sad_x3_8x4_avx2);
         p.pu[LUMA_8x8].sad_x3 = PFX(pixel_sad_x3_8x8_avx2);
-        p.pu[LUMA_8x16].sad_x3 = PFX(pixel_sad_x3_8x16_avx2);
-
-        p.pu[LUMA_8x8].sad_x4 = PFX(pixel_sad_x4_8x8_avx2);
         p.pu[LUMA_16x8].sad_x4  = PFX(pixel_sad_x4_16x8_avx2);
-        p.pu[LUMA_16x12].sad_x4 = PFX(pixel_sad_x4_16x12_avx2);
         p.pu[LUMA_16x16].sad_x4 = PFX(pixel_sad_x4_16x16_avx2);
         p.pu[LUMA_16x32].sad_x4 = PFX(pixel_sad_x4_16x32_avx2);
         p.pu[LUMA_32x32].sad_x4 = PFX(pixel_sad_x4_32x32_avx2);
@@ -2927,8 +2910,13 @@ void setupAssemblyPrimitives(EncoderPrim
         p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_avx2);
         p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_avx2);
 
-        ALL_LUMA_TU_S(cpy1Dto2D_shl, cpy1Dto2D_shl_, avx2);
-        ALL_LUMA_TU_S(cpy1Dto2D_shr, cpy1Dto2D_shr_, avx2);
+        p.cu[BLOCK_8x8].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_8_avx2);
+        p.cu[BLOCK_16x16].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shl = PFX(cpy1Dto2D_shl_32_avx2);
+
+        p.cu[BLOCK_8x8].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_8_avx2);
+        p.cu[BLOCK_16x16].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_16_avx2);
+        p.cu[BLOCK_32x32].cpy1Dto2D_shr = PFX(cpy1Dto2D_shr_32_avx2);
 
         p.cu[BLOCK_8x8].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_8_avx2);
         p.cu[BLOCK_16x16].cpy2Dto1D_shl = PFX(cpy2Dto1D_shl_16_avx2);
@@ -2938,7 +2926,10 @@ void setupAssemblyPrimitives(EncoderPrim
         p.cu[BLOCK_16x16].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_16_avx2);
         p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32_avx2);
 
-        ALL_LUMA_TU(count_nonzero, count_nonzero, avx2);
+        p.cu[BLOCK_8x8].count_nonzero = PFX(count_nonzero_8x8_avx2);
+        p.cu[BLOCK_16x16].count_nonzero = PFX(count_nonzero_16x16_avx2);
+        p.cu[BLOCK_32x32].count_nonzero = PFX(count_nonzero_32x32_avx2);
+
         p.denoiseDct = PFX(denoise_dct_avx2);
         p.quant = PFX(quant_avx2);
         p.nquant = PFX(nquant_avx2);
@@ -3000,13 +2991,11 @@ void setupAssemblyPrimitives(EncoderPrim
         p.cu[BLOCK_8x8].intra_pred[24] = PFX(intra_pred_ang8_24_avx2);
         p.cu[BLOCK_8x8].intra_pred[11] = PFX(intra_pred_ang8_11_avx2);
         p.cu[BLOCK_8x8].intra_pred[13] = PFX(intra_pred_ang8_13_avx2);
-        p.cu[BLOCK_8x8].intra_pred[20] = PFX(intra_pred_ang8_20_avx2);
         p.cu[BLOCK_8x8].intra_pred[21] = PFX(intra_pred_ang8_21_avx2);
         p.cu[BLOCK_8x8].intra_pred[22] = PFX(intra_pred_ang8_22_avx2);
         p.cu[BLOCK_8x8].intra_pred[23] = PFX(intra_pred_ang8_23_avx2);
         p.cu[BLOCK_8x8].intra_pred[14] = PFX(intra_pred_ang8_14_avx2);
         p.cu[BLOCK_8x8].intra_pred[15] = PFX(intra_pred_ang8_15_avx2);
-        p.cu[BLOCK_8x8].intra_pred[16] = PFX(intra_pred_ang8_16_avx2);
         p.cu[BLOCK_16x16].intra_pred[3] = PFX(intra_pred_ang16_3_avx2);
         p.cu[BLOCK_16x16].intra_pred[4] = PFX(intra_pred_ang16_4_avx2);
         p.cu[BLOCK_16x16].intra_pred[5] = PFX(intra_pred_ang16_5_avx2);
@@ -3016,7 +3005,6 @@ void setupAssemblyPrimitives(EncoderPrim
         p.cu[BLOCK_16x16].intra_pred[9] = PFX(intra_pred_ang16_9_avx2);
         p.cu[BLOCK_16x16].intra_pred[12] = PFX(intra_pred_ang16_12_avx2);
         p.cu[BLOCK_16x16].intra_pred[11] = PFX(intra_pred_ang16_11_avx2);
-        p.cu[BLOCK_16x16].intra_pred[13] = PFX(intra_pred_ang16_13_avx2);
         p.cu[BLOCK_16x16].intra_pred[25] = PFX(intra_pred_ang16_25_avx2);
         p.cu[BLOCK_16x16].intra_pred[28] = PFX(intra_pred_ang16_28_avx2);
         p.cu[BLOCK_16x16].intra_pred[27] = PFX(intra_pred_ang16_27_avx2);
@@ -3049,11 +3037,6 @@ void setupAssemblyPrimitives(EncoderPrim
         // all_angs primitives
         p.cu[BLOCK_4x4].intra_pred_allangs = PFX(all_angs_pred_4x4_avx2);
 
-        // copy_sp primitives
-        p.cu[BLOCK_16x16].copy_sp = PFX(blockcopy_sp_16x16_avx2);
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].copy_sp = PFX(blockcopy_sp_16x16_avx2);
-        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].copy_sp = PFX(blockcopy_sp_16x32_avx2);
-
         p.cu[BLOCK_32x32].copy_sp = PFX(blockcopy_sp_32x32_avx2);
         p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].copy_sp = PFX(blockcopy_sp_32x32_avx2);
         p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_sp = PFX(blockcopy_sp_32x64_avx2);
@@ -3065,12 +3048,6 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I420].cu[CHROMA_420_16x16].copy_ps = PFX(blockcopy_ps_16x16_avx2);
         p.chroma[X265_CSP_I422].cu[CHROMA_422_16x32].copy_ps = PFX(blockcopy_ps_16x32_avx2);
 
-        p.cu[BLOCK_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
-        p.chroma[X265_CSP_I420].cu[CHROMA_420_32x32].copy_ps = PFX(blockcopy_ps_32x32_avx2);
-        p.chroma[X265_CSP_I422].cu[CHROMA_422_32x64].copy_ps = PFX(blockcopy_ps_32x64_avx2);
-
-        p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
-
         ALL_LUMA_TU_S(dct, dct, avx2);
         ALL_LUMA_TU_S(idct, idct, avx2);
         ALL_LUMA_CU_S(transpose, transpose, avx2);
@@ -3144,9 +3121,6 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_hpp = PFX(interp_4tap_horiz_pp_2x8_avx2);
 
         p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_hpp = PFX(interp_4tap_horiz_pp_4x2_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].filter_hpp = PFX(interp_4tap_horiz_pp_4x8_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].filter_hpp = PFX(interp_4tap_horiz_pp_4x16_avx2);
-
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_hpp = PFX(interp_4tap_horiz_pp_16x4_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_hpp = PFX(interp_4tap_horiz_pp_16x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_hpp = PFX(interp_4tap_horiz_pp_16x12_avx2);
@@ -3212,10 +3186,7 @@ void setupAssemblyPrimitives(EncoderPrim
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].filter_vpp = PFX(interp_4tap_vert_pp_8x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].filter_vpp = PFX(interp_4tap_vert_pp_8x32_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].filter_vpp = PFX(interp_4tap_vert_pp_12x16_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].filter_vpp = PFX(interp_4tap_vert_pp_16x4_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].filter_vpp = PFX(interp_4tap_vert_pp_16x8_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].filter_vpp = PFX(interp_4tap_vert_pp_16x12_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp = PFX(interp_4tap_vert_pp_16x16_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vpp = PFX(interp_4tap_vert_pp_16x32_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vpp = PFX(interp_4tap_vert_pp_24x32_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vpp = PFX(interp_4tap_vert_pp_32x8_avx2);
@@ -3225,10 +3196,8 @@ void setupAssemblyPrimitives(EncoderPrim
 
         p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].filter_vps = PFX(interp_4tap_vert_ps_2x4_avx2);
         p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].filter_vps = PFX(interp_4tap_vert_ps_2x8_avx2);
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].filter_vps = PFX(interp_4tap_vert_ps_4x2_avx2);


More information about the x265-commits mailing list