[x265] [PATCH] SEA Motion Search: AVX2 framework of integral functions

jayashri at multicorewareinc.com jayashri at multicorewareinc.com
Thu May 4 15:37:04 CEST 2017


# HG changeset patch
# User Vignesh Vijayakumar
# Date 1493790986 -19800
#      Wed May 03 11:26:26 2017 +0530
# Node ID d666bdd4a1e1bb5818b9b8a7b33af0b7a2333843
# Parent  fd01abfc7898283922b34f7fcd9192ddf9c7818a
SEA Motion Search: AVX2 framework of integral functions

diff -r fd01abfc7898 -r d666bdd4a1e1 source/common/CMakeLists.txt
--- a/source/common/CMakeLists.txt	Tue May 02 14:09:48 2017 -0700
+++ b/source/common/CMakeLists.txt	Wed May 03 11:26:26 2017 +0530
@@ -57,10 +57,10 @@
     set(VEC_PRIMITIVES vec/vec-primitives.cpp ${PRIMITIVES})
     source_group(Intrinsics FILES ${VEC_PRIMITIVES})
 
-    set(C_SRCS asm-primitives.cpp pixel.h mc.h ipfilter8.h blockcopy8.h dct8.h loopfilter.h)
+    set(C_SRCS asm-primitives.cpp pixel.h mc.h ipfilter8.h blockcopy8.h dct8.h loopfilter.h seaintegral.h)
     set(A_SRCS pixel-a.asm const-a.asm cpu-a.asm ssd-a.asm mc-a.asm
                mc-a2.asm pixel-util8.asm blockcopy8.asm
-               pixeladd8.asm dct8.asm)
+               pixeladd8.asm dct8.asm seaintegral.asm)
     if(HIGH_BIT_DEPTH)
         set(A_SRCS ${A_SRCS} sad16-a.asm intrapred16.asm ipfilter16.asm loopfilter.asm)
     else()
diff -r fd01abfc7898 -r d666bdd4a1e1 source/common/primitives.cpp
--- a/source/common/primitives.cpp	Tue May 02 14:09:48 2017 -0700
+++ b/source/common/primitives.cpp	Wed May 03 11:26:26 2017 +0530
@@ -57,6 +57,7 @@
 void setupIntraPrimitives_c(EncoderPrimitives &p);
 void setupLoopFilterPrimitives_c(EncoderPrimitives &p);
 void setupSaoPrimitives_c(EncoderPrimitives &p);
+void setupSeaIntegralPrimitives_c(EncoderPrimitives &p);
 
 void setupCPrimitives(EncoderPrimitives &p)
 {
@@ -66,6 +67,7 @@
     setupIntraPrimitives_c(p);      // intrapred.cpp
     setupLoopFilterPrimitives_c(p); // loopfilter.cpp
     setupSaoPrimitives_c(p);        // sao.cpp
+    setupSeaIntegralPrimitives_c(p);  // framefilter.cpp
 }
 
 void setupAliasPrimitives(EncoderPrimitives &p)
diff -r fd01abfc7898 -r d666bdd4a1e1 source/common/primitives.h
--- a/source/common/primitives.h	Tue May 02 14:09:48 2017 -0700
+++ b/source/common/primitives.h	Wed May 03 11:26:26 2017 +0530
@@ -110,6 +110,17 @@
     BLOCK_422_32x64
 };
 
+enum IntegralSize
+{
+    INTEGRAL_4,
+    INTEGRAL_8,
+    INTEGRAL_12,
+    INTEGRAL_16,
+    INTEGRAL_24,
+    INTEGRAL_32,
+    NUM_INTEGRAL_SIZE
+};
+
 typedef int  (*pixelcmp_t)(const pixel* fenc, intptr_t fencstride, const pixel* fref, intptr_t frefstride); // fenc is aligned
 typedef int  (*pixelcmp_ss_t)(const int16_t* fenc, intptr_t fencstride, const int16_t* fref, intptr_t frefstride);
 typedef sse_t (*pixel_sse_t)(const pixel* fenc, intptr_t fencstride, const pixel* fref, intptr_t frefstride); // fenc is aligned
@@ -203,6 +214,9 @@
 typedef void (*pelFilterLumaStrong_t)(pixel* src, intptr_t srcStep, intptr_t offset, int32_t tcP, int32_t tcQ);
 typedef void (*pelFilterChroma_t)(pixel* src, intptr_t srcStep, intptr_t offset, int32_t tc, int32_t maskP, int32_t maskQ);
 
+typedef void (*integralv_t)(uint32_t *sum, intptr_t stride);
+typedef void (*integralh_t)(uint32_t *sum, pixel *pix, intptr_t stride);
+
 /* Function pointers to optimized encoder primitives. Each pointer can reference
  * either an assembly routine, a SIMD intrinsic primitive, or a C function */
 struct EncoderPrimitives
@@ -342,6 +356,9 @@
     pelFilterLumaStrong_t pelFilterLumaStrong[2]; // EDGE_VER = 0, EDGE_HOR = 1
     pelFilterChroma_t     pelFilterChroma[2];     // EDGE_VER = 0, EDGE_HOR = 1
 
+    integralv_t            integral_initv[NUM_INTEGRAL_SIZE];
+    integralh_t            integral_inith[NUM_INTEGRAL_SIZE];
+
     /* There is one set of chroma primitives per color space. An encoder will
      * have just a single color space and thus it will only ever use one entry
      * in this array. However we always fill all entries in the array in case
diff -r fd01abfc7898 -r d666bdd4a1e1 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue May 02 14:09:48 2017 -0700
+++ b/source/common/x86/asm-primitives.cpp	Wed May 03 11:26:26 2017 +0530
@@ -114,6 +114,7 @@
 #include "blockcopy8.h"
 #include "intrapred.h"
 #include "dct8.h"
+#include "seaintegral.h"
 }
 
 #define ALL_LUMA_CU_TYPED(prim, fncdef, fname, cpu) \
@@ -2157,6 +2158,19 @@
         p.fix8Unpack = PFX(cutree_fix8_unpack_avx2);
         p.fix8Pack = PFX(cutree_fix8_pack_avx2);
 
+        p.integral_initv[INTEGRAL_4] = PFX(integral4v_avx2);
+        p.integral_initv[INTEGRAL_8] = PFX(integral8v_avx2);
+        p.integral_initv[INTEGRAL_12] = PFX(integral12v_avx2);
+        p.integral_initv[INTEGRAL_16] = PFX(integral16v_avx2);
+        p.integral_initv[INTEGRAL_24] = PFX(integral24v_avx2);
+        p.integral_initv[INTEGRAL_32] = PFX(integral32v_avx2);
+        p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);
+        p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);
+        p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);
+        p.integral_inith[INTEGRAL_16] = PFX(integral16h_avx2);
+        p.integral_inith[INTEGRAL_24] = PFX(integral24h_avx2);
+        p.integral_inith[INTEGRAL_32] = PFX(integral32h_avx2);
+
         /* TODO: This kernel needs to be modified to work with HIGH_BIT_DEPTH only 
         p.planeClipAndMax = PFX(planeClipAndMax_avx2); */
 
@@ -3695,6 +3709,18 @@
         p.fix8Unpack = PFX(cutree_fix8_unpack_avx2);
         p.fix8Pack = PFX(cutree_fix8_pack_avx2);
 
+        p.integral_initv[INTEGRAL_4] = PFX(integral4v_avx2);
+        p.integral_initv[INTEGRAL_8] = PFX(integral8v_avx2);
+        p.integral_initv[INTEGRAL_12] = PFX(integral12v_avx2);
+        p.integral_initv[INTEGRAL_16] = PFX(integral16v_avx2);
+        p.integral_initv[INTEGRAL_24] = PFX(integral24v_avx2);
+        p.integral_initv[INTEGRAL_32] = PFX(integral32v_avx2);
+        p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);
+        p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);
+        p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);
+        p.integral_inith[INTEGRAL_16] = PFX(integral16h_avx2);
+        p.integral_inith[INTEGRAL_24] = PFX(integral24h_avx2);
+        p.integral_inith[INTEGRAL_32] = PFX(integral32h_avx2);
     }
 #endif
 }
diff -r fd01abfc7898 -r d666bdd4a1e1 source/common/x86/seaintegral.asm
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/x86/seaintegral.asm	Wed May 03 11:26:26 2017 +0530
@@ -0,0 +1,125 @@
+;*****************************************************************************
+;* Copyright (C) 2013-2017 MulticoreWare, Inc
+;*
+;* Authors: Jayashri Murugan <jayashri at multicorewareinc.com>
+;*          Vignesh V Menon <vignesh at multicorewareinc.com>
+;*          Praveen Tiwari <praveen at multicorewareinc.com>
+;*
+;* This program is free software; you can redistribute it and/or modify
+;* it under the terms of the GNU General Public License as published by
+;* the Free Software Foundation; either version 2 of the License, or
+;* (at your option) any later version.
+;*
+;* This program is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+;* GNU General Public License for more details.
+;*
+;* You should have received a copy of the GNU General Public License
+;* along with this program; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+;*
+;* This program is also available under a commercial proprietary license.
+;* For more information, contact us at license @ x265.com.
+;*****************************************************************************/
+
+%include "x86inc.asm"
+%include "x86util.asm"
+
+SECTION .text 
+
+;-----------------------------------------------------------------------------
+;void integral_init4v_c(uint32_t *sum4, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral4v, 2, 2, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;void integral_init8v_c(uint32_t *sum8, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral8v, 2, 2, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;void integral_init12v_c(uint32_t *sum12, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral12v, 2, 2, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;void integral_init16v_c(uint32_t *sum16, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral16v, 2, 2, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;void integral_init24v_c(uint32_t *sum24, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral24v, 2, 2, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;void integral_init32v_c(uint32_t *sum32, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral32v, 2, 2, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;static void integral_init4h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral4h, 3, 3, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;static void integral_init8h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral8h, 3, 3, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;static void integral_init12h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral12h, 3, 3, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;static void integral_init16h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral16h, 3, 3, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;static void integral_init24h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral24h, 3, 3, 0
+ 
+    RET
+
+;-----------------------------------------------------------------------------
+;static void integral_init32h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+;-----------------------------------------------------------------------------
+INIT_YMM avx2
+cglobal integral32h, 3, 3, 0
+ 
+    RET
diff -r fd01abfc7898 -r d666bdd4a1e1 source/common/x86/seaintegral.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/x86/seaintegral.h	Wed May 03 11:26:26 2017 +0530
@@ -0,0 +1,42 @@
+/*****************************************************************************
+* Copyright (C) 2013-2017 MulticoreWare, Inc
+*
+* Authors: Vignesh V Menon <vignesh at multicorewareinc.com>
+*          Jayashri Murugan <jayashri at multicorewareinc.com>
+*          Praveen Tiwari <praveen at multicorewareinc.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+*
+* This program is also available under a commercial proprietary license.
+* For more information, contact us at license @ x265.com.
+*****************************************************************************/
+
+#ifndef X265_SEAINTEGRAL_H
+#define X265_SEAINTEGRAL_H
+
+void PFX(integral4v_avx2)(uint32_t *sum, intptr_t stride);
+void PFX(integral8v_avx2)(uint32_t *sum, intptr_t stride);
+void PFX(integral12v_avx2)(uint32_t *sum, intptr_t stride);
+void PFX(integral16v_avx2)(uint32_t *sum, intptr_t stride);
+void PFX(integral24v_avx2)(uint32_t *sum, intptr_t stride);
+void PFX(integral32v_avx2)(uint32_t *sum, intptr_t stride);
+void PFX(integral4h_avx2)(uint32_t *sum, pixel *pix, intptr_t stride);
+void PFX(integral8h_avx2)(uint32_t *sum, pixel *pix, intptr_t stride);
+void PFX(integral12h_avx2)(uint32_t *sum, pixel *pix, intptr_t stride);
+void PFX(integral16h_avx2)(uint32_t *sum, pixel *pix, intptr_t stride);
+void PFX(integral24h_avx2)(uint32_t *sum, pixel *pix, intptr_t stride);
+void PFX(integral32h_avx2)(uint32_t *sum, pixel *pix, intptr_t stride);
+
+#endif //X265_SEAINTEGRAL_H
diff -r fd01abfc7898 -r d666bdd4a1e1 source/encoder/framefilter.cpp
--- a/source/encoder/framefilter.cpp	Tue May 02 14:09:48 2017 -0700
+++ b/source/encoder/framefilter.cpp	Wed May 03 11:26:26 2017 +0530
@@ -35,107 +35,126 @@
 static uint64_t computeSSD(pixel *fenc, pixel *rec, intptr_t stride, uint32_t width, uint32_t height);
 static float calculateSSIM(pixel *pix1, intptr_t stride1, pixel *pix2, intptr_t stride2, uint32_t width, uint32_t height, void *buf, uint32_t& cnt);
 
-static void integral_init4h(uint32_t *sum, pixel *pix, intptr_t stride)
-{
-    int32_t v = pix[0] + pix[1] + pix[2] + pix[3];
-    for (int16_t x = 0; x < stride - 4; x++)
-    {
-        sum[x] = v + sum[x - stride];
-        v += pix[x + 4] - pix[x];
-    }
-}
-
-static void integral_init8h(uint32_t *sum, pixel *pix, intptr_t stride)
-{
-    int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7];
-    for (int16_t x = 0; x < stride - 8; x++)
-    {
-        sum[x] = v + sum[x - stride];
-        v += pix[x + 8] - pix[x];
-    }
-}
-
-static void integral_init12h(uint32_t *sum, pixel *pix, intptr_t stride)
+namespace X265_NS
 {
-    int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
-        pix[8] + pix[9] + pix[10] + pix[11];
-    for (int16_t x = 0; x < stride - 12; x++)
-    {
-        sum[x] = v + sum[x - stride];
-        v += pix[x + 12] - pix[x];
-    }
-}
-
-static void integral_init16h(uint32_t *sum, pixel *pix, intptr_t stride)
-{
-    int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
-        pix[8] + pix[9] + pix[10] + pix[11] + pix[12] + pix[13] + pix[14] + pix[15];
-    for (int16_t x = 0; x < stride - 16; x++)
-    {
-        sum[x] = v + sum[x - stride];
-        v += pix[x + 16] - pix[x];
-    }
-}
-
-static void integral_init24h(uint32_t *sum, pixel *pix, intptr_t stride)
-{
-    int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
-        pix[8] + pix[9] + pix[10] + pix[11] + pix[12] + pix[13] + pix[14] + pix[15] +
-        pix[16] + pix[17] + pix[18] + pix[19] + pix[20] + pix[21] + pix[22] + pix[23];
-    for (int16_t x = 0; x < stride - 24; x++)
+    static void integral_init4h_c(uint32_t *sum, pixel *pix, intptr_t stride)
     {
-        sum[x] = v + sum[x - stride];
-        v += pix[x + 24] - pix[x];
+        int32_t v = pix[0] + pix[1] + pix[2] + pix[3];
+        for (int16_t x = 0; x < stride - 4; x++)
+        {
+            sum[x] = v + sum[x - stride];
+            v += pix[x + 4] - pix[x];
+        }
     }
-}
-
-static void integral_init32h(uint32_t *sum, pixel *pix, intptr_t stride)
-{
-    int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
-        pix[8] + pix[9] + pix[10] + pix[11] + pix[12] + pix[13] + pix[14] + pix[15] +
-        pix[16] + pix[17] + pix[18] + pix[19] + pix[20] + pix[21] + pix[22] + pix[23] +
-        pix[24] + pix[25] + pix[26] + pix[27] + pix[28] + pix[29] + pix[30] + pix[31];
-    for (int16_t x = 0; x < stride - 32; x++)
-    {
-        sum[x] = v + sum[x - stride];
-        v += pix[x + 32] - pix[x];
-    }
-}
-
-static void integral_init4v(uint32_t *sum4, intptr_t stride)
-{
-    for (int x = 0; x < stride; x++)
-        sum4[x] = sum4[x + 4 * stride] - sum4[x];
-}
 
-static void integral_init8v(uint32_t *sum8, intptr_t stride)
-{
-    for (int x = 0; x < stride; x++)
-        sum8[x] = sum8[x + 8 * stride] - sum8[x];
-}
-
-static void integral_init12v(uint32_t *sum12, intptr_t stride)
-{
-    for (int x = 0; x < stride; x++)
-        sum12[x] = sum12[x + 12 * stride] - sum12[x];
-}
+    static void integral_init8h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+    {
+        int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7];
+        for (int16_t x = 0; x < stride - 8; x++)
+        {
+            sum[x] = v + sum[x - stride];
+            v += pix[x + 8] - pix[x];
+        }
+    }
 
-static void integral_init16v(uint32_t *sum16, intptr_t stride)
-{
-    for (int x = 0; x < stride; x++)
-        sum16[x] = sum16[x + 16 * stride] - sum16[x];
-}
+    static void integral_init12h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+    {
+        int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
+            pix[8] + pix[9] + pix[10] + pix[11];
+        for (int16_t x = 0; x < stride - 12; x++)
+        {
+            sum[x] = v + sum[x - stride];
+            v += pix[x + 12] - pix[x];
+        }
+    }
 
-static void integral_init24v(uint32_t *sum24, intptr_t stride)
-{
-    for (int x = 0; x < stride; x++)
-        sum24[x] = sum24[x + 24 * stride] - sum24[x];
-}
+    static void integral_init16h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+    {
+        int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
+            pix[8] + pix[9] + pix[10] + pix[11] + pix[12] + pix[13] + pix[14] + pix[15];
+        for (int16_t x = 0; x < stride - 16; x++)
+        {
+            sum[x] = v + sum[x - stride];
+            v += pix[x + 16] - pix[x];
+        }
+    }
 
-static void integral_init32v(uint32_t *sum32, intptr_t stride)
-{
-    for (int x = 0; x < stride; x++)
-        sum32[x] = sum32[x + 32 * stride] - sum32[x];
+    static void integral_init24h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+    {
+        int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
+            pix[8] + pix[9] + pix[10] + pix[11] + pix[12] + pix[13] + pix[14] + pix[15] +
+            pix[16] + pix[17] + pix[18] + pix[19] + pix[20] + pix[21] + pix[22] + pix[23];
+        for (int16_t x = 0; x < stride - 24; x++)
+        {
+            sum[x] = v + sum[x - stride];
+            v += pix[x + 24] - pix[x];
+        }
+    }
+
+    static void integral_init32h_c(uint32_t *sum, pixel *pix, intptr_t stride)
+    {
+        int32_t v = pix[0] + pix[1] + pix[2] + pix[3] + pix[4] + pix[5] + pix[6] + pix[7] +
+            pix[8] + pix[9] + pix[10] + pix[11] + pix[12] + pix[13] + pix[14] + pix[15] +
+            pix[16] + pix[17] + pix[18] + pix[19] + pix[20] + pix[21] + pix[22] + pix[23] +
+            pix[24] + pix[25] + pix[26] + pix[27] + pix[28] + pix[29] + pix[30] + pix[31];
+        for (int16_t x = 0; x < stride - 32; x++)
+        {
+            sum[x] = v + sum[x - stride];
+            v += pix[x + 32] - pix[x];
+        }
+    }
+
+    static void integral_init4v_c(uint32_t *sum4, intptr_t stride)
+    {
+        for (int x = 0; x < stride; x++)
+            sum4[x] = sum4[x + 4 * stride] - sum4[x];
+    }
+
+    static void integral_init8v_c(uint32_t *sum8, intptr_t stride)
+    {
+        for (int x = 0; x < stride; x++)
+            sum8[x] = sum8[x + 8 * stride] - sum8[x];
+    }
+
+    static void integral_init12v_c(uint32_t *sum12, intptr_t stride)
+    {
+        for (int x = 0; x < stride; x++)
+            sum12[x] = sum12[x + 12 * stride] - sum12[x];
+    }
+
+    static void integral_init16v_c(uint32_t *sum16, intptr_t stride)
+    {
+        for (int x = 0; x < stride; x++)
+            sum16[x] = sum16[x + 16 * stride] - sum16[x];
+    }
+
+    static void integral_init24v_c(uint32_t *sum24, intptr_t stride)
+    {
+        for (int x = 0; x < stride; x++)
+            sum24[x] = sum24[x + 24 * stride] - sum24[x];
+    }
+
+    static void integral_init32v_c(uint32_t *sum32, intptr_t stride)
+    {
+        for (int x = 0; x < stride; x++)
+            sum32[x] = sum32[x + 32 * stride] - sum32[x];
+    }
+
+    void setupSeaIntegralPrimitives_c(EncoderPrimitives &p)
+    {
+        p.integral_initv[INTEGRAL_4] = integral_init4v_c;
+        p.integral_initv[INTEGRAL_8] = integral_init8v_c;
+        p.integral_initv[INTEGRAL_12] = integral_init12v_c;
+        p.integral_initv[INTEGRAL_16] = integral_init16v_c;
+        p.integral_initv[INTEGRAL_24] = integral_init24v_c;
+        p.integral_initv[INTEGRAL_32] = integral_init32v_c;
+        p.integral_inith[INTEGRAL_4] = integral_init4h_c;
+        p.integral_inith[INTEGRAL_8] = integral_init8h_c;
+        p.integral_inith[INTEGRAL_12] = integral_init12h_c;
+        p.integral_inith[INTEGRAL_16] = integral_init16h_c;
+        p.integral_inith[INTEGRAL_24] = integral_init24h_c;
+        p.integral_inith[INTEGRAL_32] = integral_init32h_c;
+    }
 }
 
 void FrameFilter::destroy()
@@ -833,47 +852,47 @@
             uint32_t *sum4x4 = m_frame->m_encData->m_meIntegral[11] + (y + 1) * stride - padX;
 
             /*For width = 32 */
-            integral_init32h(sum32x32, pix, stride);
+            integral_init32h_c(sum32x32, pix, stride);
             if (y >= 32 - padY)
-                integral_init32v(sum32x32 - 32 * stride, stride);
-            integral_init32h(sum32x24, pix, stride);
+                integral_init32v_c(sum32x32 - 32 * stride, stride);
+            integral_init32h_c(sum32x24, pix, stride);
             if (y >= 24 - padY)
-                integral_init24v(sum32x24 - 24 * stride, stride);
-            integral_init32h(sum32x8, pix, stride);
+                integral_init24v_c(sum32x24 - 24 * stride, stride);
+            integral_init32h_c(sum32x8, pix, stride);
             if (y >= 8 - padY)
-                integral_init8v(sum32x8 - 8 * stride, stride);
+                integral_init8v_c(sum32x8 - 8 * stride, stride);
             /*For width = 24 */
-            integral_init24h(sum24x32, pix, stride);
+            integral_init24h_c(sum24x32, pix, stride);
             if (y >= 32 - padY)
-                integral_init32v(sum24x32 - 32 * stride, stride);
+                integral_init32v_c(sum24x32 - 32 * stride, stride);
             /*For width = 16 */
-            integral_init16h(sum16x16, pix, stride);
+            integral_init16h_c(sum16x16, pix, stride);
             if (y >= 16 - padY)
-                integral_init16v(sum16x16 - 16 * stride, stride);
-            integral_init16h(sum16x12, pix, stride);
+                integral_init16v_c(sum16x16 - 16 * stride, stride);
+            integral_init16h_c(sum16x12, pix, stride);
             if (y >= 12 - padY)
-                integral_init12v(sum16x12 - 12 * stride, stride);
-            integral_init16h(sum16x4, pix, stride);
+                integral_init12v_c(sum16x12 - 12 * stride, stride);
+            integral_init16h_c(sum16x4, pix, stride);
             if (y >= 4 - padY)
-                integral_init4v(sum16x4 - 4 * stride, stride);
+                integral_init4v_c(sum16x4 - 4 * stride, stride);
             /*For width = 12 */
-            integral_init12h(sum12x16, pix, stride);
+            integral_init12h_c(sum12x16, pix, stride);
             if (y >= 16 - padY)
-                integral_init16v(sum12x16 - 16 * stride, stride);
+                integral_init16v_c(sum12x16 - 16 * stride, stride);
             /*For width = 8 */
-            integral_init8h(sum8x32, pix, stride);
+            integral_init8h_c(sum8x32, pix, stride);
             if (y >= 32 - padY)
-                integral_init32v(sum8x32 - 32 * stride, stride);
-            integral_init8h(sum8x8, pix, stride);
+                integral_init32v_c(sum8x32 - 32 * stride, stride);
+            integral_init8h_c(sum8x8, pix, stride);
             if (y >= 8 - padY)
-                integral_init8v(sum8x8 - 8 * stride, stride);
+                integral_init8v_c(sum8x8 - 8 * stride, stride);
             /*For width = 4 */
-            integral_init4h(sum4x16, pix, stride);
+            integral_init4h_c(sum4x16, pix, stride);
             if (y >= 16 - padY)
-                integral_init16v(sum4x16 - 16 * stride, stride);
-            integral_init4h(sum4x4, pix, stride);
+                integral_init16v_c(sum4x16 - 16 * stride, stride);
+            integral_init4h_c(sum4x4, pix, stride);
             if (y >= 4 - padY)
-                integral_init4v(sum4x4 - 4 * stride, stride);
+                integral_init4v_c(sum4x4 - 4 * stride, stride);
         }
         m_parallelFilter[row].m_frameFilter->integralCompleted.set(1);
     }
diff -r fd01abfc7898 -r d666bdd4a1e1 source/test/pixelharness.cpp
--- a/source/test/pixelharness.cpp	Tue May 02 14:09:48 2017 -0700
+++ b/source/test/pixelharness.cpp	Wed May 03 11:26:26 2017 +0530
@@ -2002,6 +2002,70 @@
     return true;
 }
 
+bool PixelHarness::check_integral_initv(integralv_t ref, integralv_t opt)
+{
+    intptr_t srcStep = 64;
+    int j = 0;
+    uint32_t dst_ref[BUFFSIZE] = { 0 };
+    uint32_t dst_opt[BUFFSIZE] = { 0 };
+
+    for (int i = 0; i < 64; i++)
+    {
+        dst_ref[i] = pixel_test_buff[0][i];
+        dst_opt[i] = pixel_test_buff[0][i];
+    }
+
+    for (int i = 0, k = 0; i < BUFFSIZE; i++)
+    {
+        if (i % 64 == 0)
+            k++;
+        dst_ref[i] = dst_ref[i % 64] + k;
+        dst_opt[i] = dst_opt[i % 64] + k;
+    }
+
+    int padx = 4;
+    int pady = 4;
+    uint32_t *dst_ref_ptr = dst_ref + srcStep * pady + padx;
+    uint32_t *dst_opt_ptr = dst_opt + srcStep * pady + padx;
+    for (int i = 0; i < ITERS; i++)
+    {
+        ref(dst_ref_ptr, srcStep);
+        checked(opt, dst_opt_ptr, srcStep);
+
+        if (memcmp(dst_ref, dst_opt, sizeof(uint32_t) * BUFFSIZE))
+            return false;
+
+        reportfail()
+            j += INCR;
+    }
+    return true;
+}
+
+bool PixelHarness::check_integral_inith(integralh_t ref, integralh_t opt)
+{
+    intptr_t srcStep = 64;
+    int j = 0;
+    uint32_t dst_ref[BUFFSIZE] = { 0 };
+    uint32_t dst_opt[BUFFSIZE] = { 0 };
+
+    int padx = 4;
+    int pady = 4;
+    uint32_t *dst_ref_ptr = dst_ref + srcStep * pady + padx;
+    uint32_t *dst_opt_ptr = dst_opt + srcStep * pady + padx;
+    for (int k = 0; k < ITERS; k++)
+    {
+        ref(dst_ref_ptr, pixel_test_buff[0], srcStep);
+        checked(opt, dst_opt_ptr, pixel_test_buff[0], srcStep);
+
+        if (memcmp(dst_ref, dst_opt, sizeof(uint32_t) * BUFFSIZE))
+            return false;
+
+        reportfail()
+            j += INCR;
+    }
+    return true;
+}
+
 bool PixelHarness::testPU(int part, const EncoderPrimitives& ref, const EncoderPrimitives& opt)
 {
     if (opt.pu[part].satd)
@@ -2687,6 +2751,64 @@
         }
     }
 
+    for (int k = 0; k < NUM_INTEGRAL_SIZE; k++)
+    {
+        if (opt.integral_initv[k] && !check_integral_initv(ref.integral_initv[k], opt.integral_initv[k]))
+        {
+            switch (k)
+            {
+            case 0:
+                printf("Integral4v failed!\n", k);
+                break;
+            case 1:
+                printf("Integral8v failed!\n", k);
+                break;
+            case 2:
+                printf("Integral12v failed!\n", k);
+                break;
+            case 3:
+                printf("Integral16v failed!\n", k);
+                break;
+            case 4:
+                printf("Integral24v failed!\n", k);
+                break;
+            case 5:
+                printf("Integral32v failed!\n", k);
+                break;
+            }
+            return false;
+        }
+    }
+
+
+    for (int k = 0; k < NUM_INTEGRAL_SIZE; k++)
+    {
+        if (opt.integral_inith[k] && !check_integral_inith(ref.integral_inith[k], opt.integral_inith[k]))
+        {
+            switch (k)
+            {
+                case 0:
+                    printf("Integral4h failed!\n", k);
+                    break;
+                case 1:
+                    printf("Integral8h failed!\n", k);
+                    break;
+                case 2:
+                    printf("Integral12h failed!\n", k);
+                    break;
+                case 3:
+                    printf("Integral16h failed!\n", k);
+                    break;
+                case 4:
+                    printf("Integral24h failed!\n", k);
+                    break;
+                case 5:
+                    printf("Integral32h failed!\n", k);
+                    break;
+            }
+            return false;
+        }
+    }
     return true;
 }
 
@@ -3209,4 +3331,67 @@
         HEADER0("pelFilterChroma_Horizontal");
         REPORT_SPEEDUP(opt.pelFilterChroma[1], ref.pelFilterChroma[1], pbuf1, 1, STRIDE, tc, maskP, maskQ);
     }
+
+    for (int k = 0; k < NUM_INTEGRAL_SIZE; k++)
+    {
+        if (opt.integral_initv[k])
+        {
+            switch (k)
+            {
+                case 0:
+                    HEADER0("integral_init4v");
+                    break;
+                case 1:
+                    HEADER0("integral_init8v");
+                    break;
+                case 2:
+                    HEADER0("integral_init12v");
+                    break;
+                case 3:
+                    HEADER0("integral_init16v");
+                    break;
+                case 4:
+                    HEADER0("integral_init24v");
+                    break;
+                case 5:
+                    HEADER0("integral_init32v");
+                    break;
+                default:
+                    break;
+            }
+            REPORT_SPEEDUP(opt.integral_initv[k], ref.integral_initv[k], (uint32_t*)pbuf1, STRIDE);
+        }
+    }
+
+    for (int k = 0; k < NUM_INTEGRAL_SIZE; k++)
+    {
+        if (opt.integral_inith[k])
+        {
+            uint32_t dst_buf[BUFFSIZE] = { 0 };
+            switch (k)
+            {
+            case 0:
+                HEADER0("integral_init4h");
+                break;
+            case 1:
+                HEADER0("integral_init8h");
+                break;
+            case 2:
+                HEADER0("integral_init12h");
+                break;
+            case 3:
+                HEADER0("integral_init16h");
+                break;
+            case 4:
+                HEADER0("integral_init24h");
+                break;
+            case 5:
+                HEADER0("integral_init32h");
+                break;
+            default:
+                break;
+            }
+            REPORT_SPEEDUP(opt.integral_inith[k], ref.integral_inith[k], dst_buf, pbuf1, STRIDE);
+        }
+    }
 }
diff -r fd01abfc7898 -r d666bdd4a1e1 source/test/pixelharness.h
--- a/source/test/pixelharness.h	Tue May 02 14:09:48 2017 -0700
+++ b/source/test/pixelharness.h	Wed May 03 11:26:26 2017 +0530
@@ -126,6 +126,8 @@
     bool check_pelFilterLumaStrong_H(pelFilterLumaStrong_t ref, pelFilterLumaStrong_t opt);
     bool check_pelFilterChroma_V(pelFilterChroma_t ref, pelFilterChroma_t opt);
     bool check_pelFilterChroma_H(pelFilterChroma_t ref, pelFilterChroma_t opt);
+    bool check_integral_initv(integralv_t ref, integralv_t opt);
+    bool check_integral_inith(integralh_t ref, integralh_t opt);
 
 public:
 
-------------- next part --------------
A non-text attachment was scrubbed...
Name: x265.patch
Type: text/x-patch
Size: 32166 bytes
Desc: not available
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20170504/6920f462/attachment-0001.bin>


More information about the x265-devel mailing list