[x265] [PATCH] POWER Vector Primitives

Peter Kovář peter.kovar at reflexion.tv
Fri Sep 11 21:22:08 CEST 2015


# HG changeset patch
# User Peter Kovář <peter.kovar at reflexion.tv>
# Date 1441995171 -7200
#      Fri Sep 11 20:12:51 2015 +0200
# Node ID 3cafbcfb10ed43f3b7b59430ef17f563c2f747f1
# Parent  f0b3b46f172e88b35e8863ba78df854384e8956c
POWER Vector Primitives

This is a first step in POWER optimization.

diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/CMakeLists.txt
--- a/source/common/CMakeLists.txt	Fri Sep 11 20:44:56 2015 +0200
+++ b/source/common/CMakeLists.txt	Fri Sep 11 20:12:51 2015 +0200
@@ -87,6 +87,14 @@
 string(REPLACE ";" " " VERSION_FLAGS "${VFLAGS}")
 set_source_files_properties(version.cpp PROPERTIES COMPILE_FLAGS ${VERSION_FLAGS})
 
+if(POWER)
+    set(C_SRCS vector-dct.cpp vector-intrapred.cpp vector-ipfilter.cpp vector-loopfilter.cpp vector-pixel.cpp vector-primitives.cpp vector-sao.cpp)
+    foreach(SRC ${C_SRCS})
+        set(POWER_PRIMITIVES ${POWER_PRIMITIVES} POWER/${SRC})
+    endforeach()
+    source_group(POWER FILES ${POWER_PRIMITIVES})
+endif(POWER)
+
 check_symbol_exists(strtok_r "string.h" HAVE_STRTOK_R)
 if(HAVE_STRTOK_R)
     set_source_files_properties(param.cpp PROPERTIES COMPILE_FLAGS -DHAVE_STRTOK_R=1)
@@ -100,7 +108,7 @@
 endif(WIN32)
 
 add_library(common OBJECT
-    ${ASM_PRIMITIVES} ${VEC_PRIMITIVES} ${WINXP}
+    ${ASM_PRIMITIVES} ${VEC_PRIMITIVES} ${POWER_PRIMITIVES} ${WINXP}
     primitives.cpp primitives.h
     pixel.cpp dct.cpp ipfilter.cpp intrapred.cpp loopfilter.cpp
     constants.cpp constants.h
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/cpu.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/cpu.h	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _CPU_H_
+#define _CPU_H_ 1
+
+static inline int64_t cpu_read_timebase (void)
+{
+    register int64_t t ;
+
+    asm volatile ("mftb    %0\n\t" : "=r" (t)) ;
+    return t;
+}
+
+#endif /* _CPU_H_ */
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-common.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-common.h	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,82 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _VECTOR_COMMON_H_
+#define _VECTOR_COMMON_H_ 1
+
+#include <altivec.h>
+#include <stdint.h>
+
+#include "cpu.h"
+
+#include "vector-types.h"
+
+#define ALIGNED( var, n ) var __attribute__((aligned(n)))
+
+#define ALIGNED_32( var ) ALIGNED( var, 32 )
+#define ALIGNED_16( var ) ALIGNED( var, 16 )
+#define ALIGNED_8( var )  ALIGNED( var, 8 )
+#define ALIGNED_4( var )  ALIGNED( var, 4 )
+
+#define ZERO register v_u8_t zero = vec_splat_u8(0)
+
+#define v_u8_zero  (v_u8_t)  zero
+#define v_s8_zero  (v_s8_t)  zero
+#define v_u16_zero (v_u16_t) zero
+#define v_s16_zero (v_s16_t) zero
+#define v_u32_zero (v_u32_t) zero
+#define v_s32_zero (v_s32_t) zero
+
+/***********************************************************************
+ * PREP_LOAD: declares two vectors required to perform unaligned loads
+ * VEC_LOAD:  loads n bytes from u8 * p into vector v of type t where o is from original src offset
+ * VEC_LOAD:_G: loads n bytes from u8 * p into vector v of type t - use when offset is not known
+ * VEC_LOAD_OFFSET: as above, but with offset vector known in advance
+ **********************************************************************/
+#define PREP_LOAD     \
+    v_u8_t _hv, _lv
+
+#define PREP_LOAD_SRC( src )              \
+    v_u8_t _##src##_ = vec_lvsl(0, src)
+
+#define VEC_LOAD_G( p, v, n, t )                 \
+    _hv = vec_ld( 0, p );                        \
+    v   = (t) vec_lvsl( 0, p );                  \
+    _lv = vec_ld( n - 1, p );                    \
+    v   = (t) vec_perm( _hv, _lv, (v_u8_t) v )
+
+#define VEC_LOAD( p, v, n, t, g )                   \
+    _hv = vec_ld( 0, p );                           \
+    _lv = vec_ld( n - 1, p );                       \
+    v = (t) vec_perm( _hv, _lv, (v_u8_t) _##g##_ )
+
+#define VEC_LOAD_OFFSET( p, v, n, t, o )         \
+    _hv = vec_ld( 0, p);                         \
+    _lv = vec_ld( n - 1, p );                    \
+    v   = (t) vec_perm( _hv, _lv, (v_u8_t) o )
+
+#define VEC_LOAD_PARTIAL( p, v, n, t, g)               \
+    _hv = vec_ld( 0, p);                               \
+    v   = (t) vec_perm( _hv, _hv, (v_u8_t) _##g##_ )
+
+#endif /* _VECTOR_COMMON_H_ */
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-dct.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-dct.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,1000 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+#include "contexts.h"   // costCoeffNxN_c
+#include "threading.h"  // CLZ
+
+using namespace X265_NS;
+
+// Fast DST Algorithm. Full matrix multiplication for DST and Fast DST algorithm
+// give identical results
+static void fastForwardDst(const int16_t* block, int16_t* coeff, int shift)  // input block, output coeff
+{
+    int c[4];
+    int rnd_factor = 1 << (shift - 1);
+
+    for (int i = 0; i < 4; i++)
+    {
+        // Intermediate Variables
+        c[0] = block[4 * i + 0] + block[4 * i + 3];
+        c[1] = block[4 * i + 1] + block[4 * i + 3];
+        c[2] = block[4 * i + 0] - block[4 * i + 1];
+        c[3] = 74 * block[4 * i + 2];
+
+        coeff[i] =      (int16_t)((29 * c[0] + 55 * c[1]  + c[3] + rnd_factor) >> shift);
+        coeff[4 + i] =  (int16_t)((74 * (block[4 * i + 0] + block[4 * i + 1] - block[4 * i + 3]) + rnd_factor) >> shift);
+        coeff[8 + i] =  (int16_t)((29 * c[2] + 55 * c[0]  - c[3] + rnd_factor) >> shift);
+        coeff[12 + i] = (int16_t)((55 * c[2] - 29 * c[1] + c[3] + rnd_factor) >> shift);
+    }
+}
+
+static void inversedst(const int16_t* tmp, int16_t* block, int shift)  // input tmp, output block
+{
+    int i, c[4];
+    int rnd_factor = 1 << (shift - 1);
+
+    for (i = 0; i < 4; i++)
+    {
+        // Intermediate Variables
+        c[0] = tmp[i] + tmp[8 + i];
+        c[1] = tmp[8 + i] + tmp[12 + i];
+        c[2] = tmp[i] - tmp[12 + i];
+        c[3] = 74 * tmp[4 + i];
+
+        block[4 * i + 0] = (int16_t)x265_clip3(-32768, 32767, (29 * c[0] + 55 * c[1]     + c[3]               + rnd_factor) >> shift);
+        block[4 * i + 1] = (int16_t)x265_clip3(-32768, 32767, (55 * c[2] - 29 * c[1]     + c[3]               + rnd_factor) >> shift);
+        block[4 * i + 2] = (int16_t)x265_clip3(-32768, 32767, (74 * (tmp[i] - tmp[8 + i]  + tmp[12 + i])      + rnd_factor) >> shift);
+        block[4 * i + 3] = (int16_t)x265_clip3(-32768, 32767, (55 * c[0] + 29 * c[2]     - c[3]               + rnd_factor) >> shift);
+    }
+}
+
+static void partialButterfly16(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[8], O[8];
+    int EE[4], EO[4];
+    int EEE[2], EEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O */
+        for (k = 0; k < 8; k++)
+        {
+            E[k] = src[k] + src[15 - k];
+            O[k] = src[k] - src[15 - k];
+        }
+
+        /* EE and EO */
+        for (k = 0; k < 4; k++)
+        {
+            EE[k] = E[k] + E[7 - k];
+            EO[k] = E[k] - E[7 - k];
+        }
+
+        /* EEE and EEO */
+        EEE[0] = EE[0] + EE[3];
+        EEO[0] = EE[0] - EE[3];
+        EEE[1] = EE[1] + EE[2];
+        EEO[1] = EE[1] - EE[2];
+
+        dst[0] = (int16_t)((g_t16[0][0] * EEE[0] + g_t16[0][1] * EEE[1] + add) >> shift);
+        dst[8 * line] = (int16_t)((g_t16[8][0] * EEE[0] + g_t16[8][1] * EEE[1] + add) >> shift);
+        dst[4 * line] = (int16_t)((g_t16[4][0] * EEO[0] + g_t16[4][1] * EEO[1] + add) >> shift);
+        dst[12 * line] = (int16_t)((g_t16[12][0] * EEO[0] + g_t16[12][1] * EEO[1] + add) >> shift);
+
+        for (k = 2; k < 16; k += 4)
+        {
+            dst[k * line] = (int16_t)((g_t16[k][0] * EO[0] + g_t16[k][1] * EO[1] + g_t16[k][2] * EO[2] +
+                                       g_t16[k][3] * EO[3] + add) >> shift);
+        }
+
+        for (k = 1; k < 16; k += 2)
+        {
+            dst[k * line] =  (int16_t)((g_t16[k][0] * O[0] + g_t16[k][1] * O[1] + g_t16[k][2] * O[2] + g_t16[k][3] * O[3] +
+                                        g_t16[k][4] * O[4] + g_t16[k][5] * O[5] + g_t16[k][6] * O[6] + g_t16[k][7] * O[7] +
+                                        add) >> shift);
+        }
+
+        src += 16;
+        dst++;
+    }
+}
+
+static void partialButterfly32(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[16], O[16];
+    int EE[8], EO[8];
+    int EEE[4], EEO[4];
+    int EEEE[2], EEEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O*/
+        for (k = 0; k < 16; k++)
+        {
+            E[k] = src[k] + src[31 - k];
+            O[k] = src[k] - src[31 - k];
+        }
+
+        /* EE and EO */
+        for (k = 0; k < 8; k++)
+        {
+            EE[k] = E[k] + E[15 - k];
+            EO[k] = E[k] - E[15 - k];
+        }
+
+        /* EEE and EEO */
+        for (k = 0; k < 4; k++)
+        {
+            EEE[k] = EE[k] + EE[7 - k];
+            EEO[k] = EE[k] - EE[7 - k];
+        }
+
+        /* EEEE and EEEO */
+        EEEE[0] = EEE[0] + EEE[3];
+        EEEO[0] = EEE[0] - EEE[3];
+        EEEE[1] = EEE[1] + EEE[2];
+        EEEO[1] = EEE[1] - EEE[2];
+
+        dst[0] = (int16_t)((g_t32[0][0] * EEEE[0] + g_t32[0][1] * EEEE[1] + add) >> shift);
+        dst[16 * line] = (int16_t)((g_t32[16][0] * EEEE[0] + g_t32[16][1] * EEEE[1] + add) >> shift);
+        dst[8 * line] = (int16_t)((g_t32[8][0] * EEEO[0] + g_t32[8][1] * EEEO[1] + add) >> shift);
+        dst[24 * line] = (int16_t)((g_t32[24][0] * EEEO[0] + g_t32[24][1] * EEEO[1] + add) >> shift);
+        for (k = 4; k < 32; k += 8)
+        {
+            dst[k * line] = (int16_t)((g_t32[k][0] * EEO[0] + g_t32[k][1] * EEO[1] + g_t32[k][2] * EEO[2] +
+                                       g_t32[k][3] * EEO[3] + add) >> shift);
+        }
+
+        for (k = 2; k < 32; k += 4)
+        {
+            dst[k * line] = (int16_t)((g_t32[k][0] * EO[0] + g_t32[k][1] * EO[1] + g_t32[k][2] * EO[2] +
+                                       g_t32[k][3] * EO[3] + g_t32[k][4] * EO[4] + g_t32[k][5] * EO[5] +
+                                       g_t32[k][6] * EO[6] + g_t32[k][7] * EO[7] + add) >> shift);
+        }
+
+        for (k = 1; k < 32; k += 2)
+        {
+            dst[k * line] = (int16_t)((g_t32[k][0] * O[0] + g_t32[k][1] * O[1] + g_t32[k][2] * O[2] + g_t32[k][3] * O[3] +
+                                       g_t32[k][4] * O[4] + g_t32[k][5] * O[5] + g_t32[k][6] * O[6] + g_t32[k][7] * O[7] +
+                                       g_t32[k][8] * O[8] + g_t32[k][9] * O[9] + g_t32[k][10] * O[10] + g_t32[k][11] *
+                                       O[11] + g_t32[k][12] * O[12] + g_t32[k][13] * O[13] + g_t32[k][14] * O[14] +
+                                       g_t32[k][15] * O[15] + add) >> shift);
+        }
+
+        src += 32;
+        dst++;
+    }
+}
+
+static void partialButterfly8(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[4], O[4];
+    int EE[2], EO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O*/
+        for (k = 0; k < 4; k++)
+        {
+            E[k] = src[k] + src[7 - k];
+            O[k] = src[k] - src[7 - k];
+        }
+
+        /* EE and EO */
+        EE[0] = E[0] + E[3];
+        EO[0] = E[0] - E[3];
+        EE[1] = E[1] + E[2];
+        EO[1] = E[1] - E[2];
+
+        dst[0] = (int16_t)((g_t8[0][0] * EE[0] + g_t8[0][1] * EE[1] + add) >> shift);
+        dst[4 * line] = (int16_t)((g_t8[4][0] * EE[0] + g_t8[4][1] * EE[1] + add) >> shift);
+        dst[2 * line] = (int16_t)((g_t8[2][0] * EO[0] + g_t8[2][1] * EO[1] + add) >> shift);
+        dst[6 * line] = (int16_t)((g_t8[6][0] * EO[0] + g_t8[6][1] * EO[1] + add) >> shift);
+
+        dst[line] = (int16_t)((g_t8[1][0] * O[0] + g_t8[1][1] * O[1] + g_t8[1][2] * O[2] + g_t8[1][3] * O[3] + add) >> shift);
+        dst[3 * line] = (int16_t)((g_t8[3][0] * O[0] + g_t8[3][1] * O[1] + g_t8[3][2] * O[2] + g_t8[3][3] * O[3] + add) >> shift);
+        dst[5 * line] = (int16_t)((g_t8[5][0] * O[0] + g_t8[5][1] * O[1] + g_t8[5][2] * O[2] + g_t8[5][3] * O[3] + add) >> shift);
+        dst[7 * line] = (int16_t)((g_t8[7][0] * O[0] + g_t8[7][1] * O[1] + g_t8[7][2] * O[2] + g_t8[7][3] * O[3] + add) >> shift);
+
+        src += 8;
+        dst++;
+    }
+}
+
+static void partialButterflyInverse4(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j;
+    int E[2], O[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        O[0] = g_t4[1][0] * src[line] + g_t4[3][0] * src[3 * line];
+        O[1] = g_t4[1][1] * src[line] + g_t4[3][1] * src[3 * line];
+        E[0] = g_t4[0][0] * src[0] + g_t4[2][0] * src[2 * line];
+        E[1] = g_t4[0][1] * src[0] + g_t4[2][1] * src[2 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        dst[0] = (int16_t)(x265_clip3(-32768, 32767, (E[0] + O[0] + add) >> shift));
+        dst[1] = (int16_t)(x265_clip3(-32768, 32767, (E[1] + O[1] + add) >> shift));
+        dst[2] = (int16_t)(x265_clip3(-32768, 32767, (E[1] - O[1] + add) >> shift));
+        dst[3] = (int16_t)(x265_clip3(-32768, 32767, (E[0] - O[0] + add) >> shift));
+
+        src++;
+        dst += 4;
+    }
+}
+
+static void partialButterflyInverse8(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[4], O[4];
+    int EE[2], EO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        for (k = 0; k < 4; k++)
+        {
+            O[k] = g_t8[1][k] * src[line] + g_t8[3][k] * src[3 * line] + g_t8[5][k] * src[5 * line] + g_t8[7][k] * src[7 * line];
+        }
+
+        EO[0] = g_t8[2][0] * src[2 * line] + g_t8[6][0] * src[6 * line];
+        EO[1] = g_t8[2][1] * src[2 * line] + g_t8[6][1] * src[6 * line];
+        EE[0] = g_t8[0][0] * src[0] + g_t8[4][0] * src[4 * line];
+        EE[1] = g_t8[0][1] * src[0] + g_t8[4][1] * src[4 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        E[0] = EE[0] + EO[0];
+        E[3] = EE[0] - EO[0];
+        E[1] = EE[1] + EO[1];
+        E[2] = EE[1] - EO[1];
+        for (k = 0; k < 4; k++)
+        {
+            dst[k] = (int16_t)x265_clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
+            dst[k + 4] = (int16_t)x265_clip3(-32768, 32767, (E[3 - k] - O[3 - k] + add) >> shift);
+        }
+
+        src++;
+        dst += 8;
+    }
+}
+
+static void partialButterflyInverse16(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[8], O[8];
+    int EE[4], EO[4];
+    int EEE[2], EEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        for (k = 0; k < 8; k++)
+        {
+            O[k] = g_t16[1][k] * src[line] + g_t16[3][k] * src[3 * line] + g_t16[5][k] * src[5 * line] + g_t16[7][k] * src[7 * line] +
+                g_t16[9][k] * src[9 * line] + g_t16[11][k] * src[11 * line] + g_t16[13][k] * src[13 * line] + g_t16[15][k] * src[15 * line];
+        }
+
+        for (k = 0; k < 4; k++)
+        {
+            EO[k] = g_t16[2][k] * src[2 * line] + g_t16[6][k] * src[6 * line] + g_t16[10][k] * src[10 * line] + g_t16[14][k] * src[14 * line];
+        }
+
+        EEO[0] = g_t16[4][0] * src[4 * line] + g_t16[12][0] * src[12 * line];
+        EEE[0] = g_t16[0][0] * src[0] + g_t16[8][0] * src[8 * line];
+        EEO[1] = g_t16[4][1] * src[4 * line] + g_t16[12][1] * src[12 * line];
+        EEE[1] = g_t16[0][1] * src[0] + g_t16[8][1] * src[8 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        for (k = 0; k < 2; k++)
+        {
+            EE[k] = EEE[k] + EEO[k];
+            EE[k + 2] = EEE[1 - k] - EEO[1 - k];
+        }
+
+        for (k = 0; k < 4; k++)
+        {
+            E[k] = EE[k] + EO[k];
+            E[k + 4] = EE[3 - k] - EO[3 - k];
+        }
+
+        for (k = 0; k < 8; k++)
+        {
+            dst[k]   = (int16_t)x265_clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
+            dst[k + 8] = (int16_t)x265_clip3(-32768, 32767, (E[7 - k] - O[7 - k] + add) >> shift);
+        }
+
+        src++;
+        dst += 16;
+    }
+}
+
+static void partialButterflyInverse32(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j, k;
+    int E[16], O[16];
+    int EE[8], EO[8];
+    int EEE[4], EEO[4];
+    int EEEE[2], EEEO[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* Utilizing symmetry properties to the maximum to minimize the number of multiplications */
+        for (k = 0; k < 16; k++)
+        {
+            O[k] = g_t32[1][k] * src[line] + g_t32[3][k] * src[3 * line] + g_t32[5][k] * src[5 * line] + g_t32[7][k] * src[7 * line] +
+                g_t32[9][k] * src[9 * line] + g_t32[11][k] * src[11 * line] + g_t32[13][k] * src[13 * line] + g_t32[15][k] * src[15 * line] +
+                g_t32[17][k] * src[17 * line] + g_t32[19][k] * src[19 * line] + g_t32[21][k] * src[21 * line] + g_t32[23][k] * src[23 * line] +
+                g_t32[25][k] * src[25 * line] + g_t32[27][k] * src[27 * line] + g_t32[29][k] * src[29 * line] + g_t32[31][k] * src[31 * line];
+        }
+
+        for (k = 0; k < 8; k++)
+        {
+            EO[k] = g_t32[2][k] * src[2 * line] + g_t32[6][k] * src[6 * line] + g_t32[10][k] * src[10 * line] + g_t32[14][k] * src[14 * line] +
+                g_t32[18][k] * src[18 * line] + g_t32[22][k] * src[22 * line] + g_t32[26][k] * src[26 * line] + g_t32[30][k] * src[30 * line];
+        }
+
+        for (k = 0; k < 4; k++)
+        {
+            EEO[k] = g_t32[4][k] * src[4 * line] + g_t32[12][k] * src[12 * line] + g_t32[20][k] * src[20 * line] + g_t32[28][k] * src[28 * line];
+        }
+
+        EEEO[0] = g_t32[8][0] * src[8 * line] + g_t32[24][0] * src[24 * line];
+        EEEO[1] = g_t32[8][1] * src[8 * line] + g_t32[24][1] * src[24 * line];
+        EEEE[0] = g_t32[0][0] * src[0] + g_t32[16][0] * src[16 * line];
+        EEEE[1] = g_t32[0][1] * src[0] + g_t32[16][1] * src[16 * line];
+
+        /* Combining even and odd terms at each hierarchy levels to calculate the final spatial domain vector */
+        EEE[0] = EEEE[0] + EEEO[0];
+        EEE[3] = EEEE[0] - EEEO[0];
+        EEE[1] = EEEE[1] + EEEO[1];
+        EEE[2] = EEEE[1] - EEEO[1];
+        for (k = 0; k < 4; k++)
+        {
+            EE[k] = EEE[k] + EEO[k];
+            EE[k + 4] = EEE[3 - k] - EEO[3 - k];
+        }
+
+        for (k = 0; k < 8; k++)
+        {
+            E[k] = EE[k] + EO[k];
+            E[k + 8] = EE[7 - k] - EO[7 - k];
+        }
+
+        for (k = 0; k < 16; k++)
+        {
+            dst[k] = (int16_t)x265_clip3(-32768, 32767, (E[k] + O[k] + add) >> shift);
+            dst[k + 16] = (int16_t)x265_clip3(-32768, 32767, (E[15 - k] - O[15 - k] + add) >> shift);
+        }
+
+        src++;
+        dst += 32;
+    }
+}
+
+static void partialButterfly4(const int16_t* src, int16_t* dst, int shift, int line)
+{
+    int j;
+    int E[2], O[2];
+    int add = 1 << (shift - 1);
+
+    for (j = 0; j < line; j++)
+    {
+        /* E and O */
+        E[0] = src[0] + src[3];
+        O[0] = src[0] - src[3];
+        E[1] = src[1] + src[2];
+        O[1] = src[1] - src[2];
+
+        dst[0] = (int16_t)((g_t4[0][0] * E[0] + g_t4[0][1] * E[1] + add) >> shift);
+        dst[2 * line] = (int16_t)((g_t4[2][0] * E[0] + g_t4[2][1] * E[1] + add) >> shift);
+        dst[line] = (int16_t)((g_t4[1][0] * O[0] + g_t4[1][1] * O[1] + add) >> shift);
+        dst[3 * line] = (int16_t)((g_t4[3][0] * O[0] + g_t4[3][1] * O[1] + add) >> shift);
+
+        src += 4;
+        dst++;
+    }
+}
+
+static void dst4_v(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 1 + X265_DEPTH - 8;
+    const int shift_2nd = 8;
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&block[i * 4], &src[i * srcStride], 4 * sizeof(int16_t));
+    }
+
+    fastForwardDst(block, coef, shift_1st);
+    fastForwardDst(coef, dst, shift_2nd);
+}
+
+static void dct4_v(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 1 + X265_DEPTH - 8;
+    const int shift_2nd = 8;
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&block[i * 4], &src[i * srcStride], 4 * sizeof(int16_t));
+    }
+
+    partialButterfly4(block, coef, shift_1st, 4);
+    partialButterfly4(coef, dst, shift_2nd, 4);
+}
+
+static void dct8_v(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 2 + X265_DEPTH - 8;
+    const int shift_2nd = 9;
+
+    ALIGN_VAR_32(int16_t, coef[8 * 8]);
+    ALIGN_VAR_32(int16_t, block[8 * 8]);
+
+    for (int i = 0; i < 8; i++)
+    {
+        memcpy(&block[i * 8], &src[i * srcStride], 8 * sizeof(int16_t));
+    }
+
+    partialButterfly8(block, coef, shift_1st, 8);
+    partialButterfly8(coef, dst, shift_2nd, 8);
+}
+
+static void dct16_v(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 3 + X265_DEPTH - 8;
+    const int shift_2nd = 10;
+
+    ALIGN_VAR_32(int16_t, coef[16 * 16]);
+    ALIGN_VAR_32(int16_t, block[16 * 16]);
+
+    for (int i = 0; i < 16; i++)
+    {
+        memcpy(&block[i * 16], &src[i * srcStride], 16 * sizeof(int16_t));
+    }
+
+    partialButterfly16(block, coef, shift_1st, 16);
+    partialButterfly16(coef, dst, shift_2nd, 16);
+}
+
+static void dct32_v(const int16_t* src, int16_t* dst, intptr_t srcStride)
+{
+    const int shift_1st = 4 + X265_DEPTH - 8;
+    const int shift_2nd = 11;
+
+    ALIGN_VAR_32(int16_t, coef[32 * 32]);
+    ALIGN_VAR_32(int16_t, block[32 * 32]);
+
+    for (int i = 0; i < 32; i++)
+    {
+        memcpy(&block[i * 32], &src[i * srcStride], 32 * sizeof(int16_t));
+    }
+
+    partialButterfly32(block, coef, shift_1st, 32);
+    partialButterfly32(coef, dst, shift_2nd, 32);
+}
+
+static void idst4_v(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    inversedst(src, coef, shift_1st); // Forward DST BY FAST ALGORITHM, block input, coef output
+    inversedst(coef, block, shift_2nd); // Forward DST BY FAST ALGORITHM, coef input, coeff output
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 4], 4 * sizeof(int16_t));
+    }
+}
+
+static void idct4_v(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[4 * 4]);
+    ALIGN_VAR_32(int16_t, block[4 * 4]);
+
+    partialButterflyInverse4(src, coef, shift_1st, 4); // Forward DST BY FAST ALGORITHM, block input, coef output
+    partialButterflyInverse4(coef, block, shift_2nd, 4); // Forward DST BY FAST ALGORITHM, coef input, coeff output
+
+    for (int i = 0; i < 4; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 4], 4 * sizeof(int16_t));
+    }
+}
+
+static void idct8_v(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[8 * 8]);
+    ALIGN_VAR_32(int16_t, block[8 * 8]);
+
+    partialButterflyInverse8(src, coef, shift_1st, 8);
+    partialButterflyInverse8(coef, block, shift_2nd, 8);
+
+    for (int i = 0; i < 8; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 8], 8 * sizeof(int16_t));
+    }
+}
+
+static void idct16_v(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[16 * 16]);
+    ALIGN_VAR_32(int16_t, block[16 * 16]);
+
+    partialButterflyInverse16(src, coef, shift_1st, 16);
+    partialButterflyInverse16(coef, block, shift_2nd, 16);
+
+    for (int i = 0; i < 16; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 16], 16 * sizeof(int16_t));
+    }
+}
+
+static void idct32_v(const int16_t* src, int16_t* dst, intptr_t dstStride)
+{
+    const int shift_1st = 7;
+    const int shift_2nd = 12 - (X265_DEPTH - 8);
+
+    ALIGN_VAR_32(int16_t, coef[32 * 32]);
+    ALIGN_VAR_32(int16_t, block[32 * 32]);
+
+    partialButterflyInverse32(src, coef, shift_1st, 32);
+    partialButterflyInverse32(coef, block, shift_2nd, 32);
+
+    for (int i = 0; i < 32; i++)
+    {
+        memcpy(&dst[i * dstStride], &block[i * 32], 32 * sizeof(int16_t));
+    }
+}
+
+static void dequant_normal_v(const int16_t* quantCoef, int16_t* coef, int num, int scale, int shift)
+{
+#if HIGH_BIT_DEPTH
+    X265_CHECK(scale < 32768 || ((scale & 3) == 0 && shift > (X265_DEPTH - 8)), "dequant invalid scale %d\n", scale);
+#else
+    // NOTE: maximum of scale is (72 * 256)
+    X265_CHECK(scale < 32768, "dequant invalid scale %d\n", scale);
+#endif
+    X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
+    X265_CHECK((num % 8) == 0, "dequant num %d not multiple of 8\n", num);
+    X265_CHECK(shift <= 10, "shift too large %d\n", shift);
+    X265_CHECK(((intptr_t)coef & 31) == 0, "dequant coef buffer not aligned\n");
+
+    int add, coeffQ;
+
+    add = 1 << (shift - 1);
+
+    for (int n = 0; n < num; n++)
+    {
+        coeffQ = (quantCoef[n] * scale + add) >> shift;
+        coef[n] = (int16_t)x265_clip3(-32768, 32767, coeffQ);
+    }
+}
+
+static void dequant_scaling_v(const int16_t* quantCoef, const int32_t* deQuantCoef, int16_t* coef, int num, int per, int shift)
+{
+    X265_CHECK(num <= 32 * 32, "dequant num %d too large\n", num);
+
+    int add, coeffQ;
+
+    shift += 4;
+
+    if (shift > per)
+    {
+        add = 1 << (shift - per - 1);
+
+        for (int n = 0; n < num; n++)
+        {
+            coeffQ = ((quantCoef[n] * deQuantCoef[n]) + add) >> (shift - per);
+            coef[n] = (int16_t)x265_clip3(-32768, 32767, coeffQ);
+        }
+    }
+    else
+    {
+        for (int n = 0; n < num; n++)
+        {
+            coeffQ   = x265_clip3(-32768, 32767, quantCoef[n] * deQuantCoef[n]);
+            coef[n] = (int16_t)x265_clip3(-32768, 32767, coeffQ << (per - shift));
+        }
+    }
+}
+
+static uint32_t quant_v(const int16_t* coef, const int32_t* quantCoeff, int32_t* deltaU, int16_t* qCoef, int qBits, int add, int numCoeff)
+{
+    X265_CHECK(qBits >= 8, "qBits less than 8\n");
+    X265_CHECK((numCoeff % 16) == 0, "numCoeff must be multiple of 16\n");
+    int qBits8 = qBits - 8;
+    uint32_t numSig = 0;
+
+    for (int blockpos = 0; blockpos < numCoeff; blockpos++)
+    {
+        int level = coef[blockpos];
+        int sign  = (level < 0 ? -1 : 1);
+
+        int tmplevel = abs(level) * quantCoeff[blockpos];
+        level = ((tmplevel + add) >> qBits);
+        deltaU[blockpos] = ((tmplevel - (level << qBits)) >> qBits8);
+        if (level)
+            ++numSig;
+        level *= sign;
+        qCoef[blockpos] = (int16_t)x265_clip3(-32768, 32767, level);
+    }
+
+    return numSig;
+}
+
+static uint32_t nquant_v(const int16_t* coef, const int32_t* quantCoeff, int16_t* qCoef, int qBits, int add, int numCoeff)
+{
+    X265_CHECK((numCoeff % 16) == 0, "number of quant coeff is not multiple of 4x4\n");
+    X265_CHECK((uint32_t)add < ((uint32_t)1 << qBits), "2 ^ qBits less than add\n");
+    X265_CHECK(((intptr_t)quantCoeff & 31) == 0, "quantCoeff buffer not aligned\n");
+
+    uint32_t numSig = 0;
+
+    for (int blockpos = 0; blockpos < numCoeff; blockpos++)
+    {
+        int level = coef[blockpos];
+        int sign  = (level < 0 ? -1 : 1);
+
+        int tmplevel = abs(level) * quantCoeff[blockpos];
+        level = ((tmplevel + add) >> qBits);
+        if (level)
+            ++numSig;
+        level *= sign;
+        qCoef[blockpos] = (int16_t)x265_clip3(-32768, 32767, level);
+    }
+
+    return numSig;
+}
+template<int trSize>
+int  count_nonzero_v(const int16_t* quantCoeff)
+{
+    X265_CHECK(((intptr_t)quantCoeff & 15) == 0, "quant buffer not aligned\n");
+    int count = 0;
+    int numCoeff = trSize * trSize;
+    for (int i = 0; i < numCoeff; i++)
+    {
+        count += quantCoeff[i] != 0;
+    }
+
+    return count;
+}
+
+template<int trSize>
+uint32_t copy_count(int16_t* coeff, const int16_t* residual, intptr_t resiStride)
+{
+    uint32_t numSig = 0;
+    for (int k = 0; k < trSize; k++)
+    {
+        for (int j = 0; j < trSize; j++)
+        {
+            coeff[k * trSize + j] = residual[k * resiStride + j];
+            numSig += (residual[k * resiStride + j] != 0);
+        }
+    }
+
+    return numSig;
+}
+
+static void denoiseDct_v(int16_t* dctCoef, uint32_t* resSum, const uint16_t* offset, int numCoeff)
+{
+    for (int i = 0; i < numCoeff; i++)
+    {
+        int level = dctCoef[i];
+        int sign = level >> 31;
+        level = (level + sign) ^ sign;
+        resSum[i] += level;
+        level -= offset[i];
+        dctCoef[i] = (int16_t)(level < 0 ? 0 : (level ^ sign) - sign);
+    }
+}
+
+static int scanPosLast_v(const uint16_t *scan, const coeff_t *coeff, uint16_t *coeffSign, uint16_t *coeffFlag, uint8_t *coeffNum, int numSig, const uint16_t* /*scanCG4x4*/, const int /*trSize*/)
+{
+    memset(coeffNum, 0, MLS_GRP_NUM * sizeof(*coeffNum));
+    memset(coeffFlag, 0, MLS_GRP_NUM * sizeof(*coeffFlag));
+    memset(coeffSign, 0, MLS_GRP_NUM * sizeof(*coeffSign));
+
+    int scanPosLast = 0;
+    do
+    {
+        const uint32_t cgIdx = (uint32_t)scanPosLast >> MLS_CG_SIZE;
+
+        const uint32_t posLast = scan[scanPosLast++];
+
+        const int curCoeff = coeff[posLast];
+        const uint32_t isNZCoeff = (curCoeff != 0);
+        // get L1 sig map
+        // NOTE: the new algorithm is complicated, so I keep reference code here
+        //uint32_t posy   = posLast >> log2TrSize;
+        //uint32_t posx   = posLast - (posy << log2TrSize);
+        //uint32_t blkIdx0 = ((posy >> MLS_CG_LOG2_SIZE) << codingParameters.log2TrSizeCG) + (posx >> MLS_CG_LOG2_SIZE);
+        //const uint32_t blkIdx = ((posLast >> (2 * MLS_CG_LOG2_SIZE)) & ~maskPosXY) + ((posLast >> MLS_CG_LOG2_SIZE) & maskPosXY);
+        //sigCoeffGroupFlag64 |= ((uint64_t)isNZCoeff << blkIdx);
+        numSig -= isNZCoeff;
+
+        // TODO: optimize by instruction BTS
+        coeffSign[cgIdx] += (uint16_t)(((uint32_t)curCoeff >> 31) << coeffNum[cgIdx]);
+        coeffFlag[cgIdx] = (coeffFlag[cgIdx] << 1) + (uint16_t)isNZCoeff;
+        coeffNum[cgIdx] += (uint8_t)isNZCoeff;
+    }
+    while (numSig > 0);
+    return scanPosLast - 1;
+}
+
+static uint32_t findPosFirstLast_v(const int16_t *dstCoeff, const intptr_t trSize, const uint16_t scanTbl[16])
+{
+    int n;
+
+    for (n = SCAN_SET_SIZE - 1; n >= 0; --n)
+    {
+        const uint32_t idx = scanTbl[n];
+        const uint32_t idxY = idx / MLS_CG_SIZE;
+        const uint32_t idxX = idx % MLS_CG_SIZE;
+        if (dstCoeff[idxY * trSize + idxX])
+            break;
+    }
+
+    X265_CHECK(n >= -1, "non-zero coeff scan failuare!\n");
+
+    uint32_t lastNZPosInCG = (uint32_t)n;
+
+    for (n = 0; n < SCAN_SET_SIZE; n++)
+    {
+        const uint32_t idx = scanTbl[n];
+        const uint32_t idxY = idx / MLS_CG_SIZE;
+        const uint32_t idxX = idx % MLS_CG_SIZE;
+        if (dstCoeff[idxY * trSize + idxX])
+            break;
+    }
+
+    uint32_t firstNZPosInCG = (uint32_t)n;
+
+    // NOTE: when coeff block all ZERO, the lastNZPosInCG is undefined and firstNZPosInCG is 16
+    return ((lastNZPosInCG << 16) | firstNZPosInCG);
+}
+
+
+static uint32_t costCoeffNxN_v(const uint16_t *scan, const coeff_t *coeff, intptr_t trSize, uint16_t *absCoeff, const uint8_t *tabSigCtx, uint32_t scanFlagMask, uint8_t *baseCtx, int offset, int scanPosSigOff, int subPosBase)
+{
+    ALIGN_VAR_32(uint16_t, tmpCoeff[SCAN_SET_SIZE]);
+    uint32_t numNonZero = (scanPosSigOff < (SCAN_SET_SIZE - 1) ? 1 : 0);
+    uint32_t sum = 0;
+
+    // correct offset to match assembly
+    absCoeff -= numNonZero;
+
+    for (int i = 0; i < MLS_CG_SIZE; i++)
+    {
+        tmpCoeff[i * MLS_CG_SIZE + 0] = (uint16_t)abs(coeff[i * trSize + 0]);
+        tmpCoeff[i * MLS_CG_SIZE + 1] = (uint16_t)abs(coeff[i * trSize + 1]);
+        tmpCoeff[i * MLS_CG_SIZE + 2] = (uint16_t)abs(coeff[i * trSize + 2]);
+        tmpCoeff[i * MLS_CG_SIZE + 3] = (uint16_t)abs(coeff[i * trSize + 3]);
+    }
+
+    do
+    {
+        uint32_t blkPos, sig, ctxSig;
+        blkPos = scan[scanPosSigOff];
+        const uint32_t posZeroMask = (subPosBase + scanPosSigOff) ? ~0 : 0;
+        sig     = scanFlagMask & 1;
+        scanFlagMask >>= 1;
+        X265_CHECK((uint32_t)(tmpCoeff[blkPos] != 0) == sig, "sign bit mistake\n");
+        if ((scanPosSigOff != 0) || (subPosBase == 0) || numNonZero)
+        {
+            const uint32_t cnt = tabSigCtx[blkPos] + offset;
+            ctxSig = cnt & posZeroMask;
+
+            //X265_CHECK(ctxSig == Quant::getSigCtxInc(patternSigCtx, log2TrSize, trSize, codingParameters.scan[subPosBase + scanPosSigOff], bIsLuma, codingParameters.firstSignificanceMapContext), "sigCtx mistake!\n");;
+            //encodeBin(sig, baseCtx[ctxSig]);
+            const uint32_t mstate = baseCtx[ctxSig];
+            const uint32_t mps = mstate & 1;
+            const uint32_t stateBits = PFX(entropyStateBits)[mstate ^ sig];
+            uint32_t nextState = (stateBits >> 24) + mps;
+            if ((mstate ^ sig) == 1)
+                nextState = sig;
+            X265_CHECK(sbacNext(mstate, sig) == nextState, "nextState check failure\n");
+            X265_CHECK(sbacGetEntropyBits(mstate, sig) == (stateBits & 0xFFFFFF), "entropyBits check failure\n");
+            baseCtx[ctxSig] = (uint8_t)nextState;
+            sum += stateBits;
+        }
+        assert(numNonZero <= 15);
+        assert(blkPos <= 15);
+        absCoeff[numNonZero] = tmpCoeff[blkPos];
+        numNonZero += sig;
+        scanPosSigOff--;
+    }
+    while(scanPosSigOff >= 0);
+
+    return (sum & 0xFFFFFF);
+}
+
+static uint32_t costCoeffRemain_v(uint16_t *absCoeff, int numNonZero, int idx)
+{
+    uint32_t goRiceParam = 0;
+
+    uint32_t sum = 0;
+    int baseLevel = 3;
+    do
+    {
+        if (idx >= C1FLAG_NUMBER)
+            baseLevel = 1;
+
+        // TODO: the IDX is not really idx, so this check inactive
+        //X265_CHECK(baseLevel == ((idx < C1FLAG_NUMBER) ? (2 + firstCoeff2) : 1), "baseLevel check failurr\n");
+        int codeNumber = absCoeff[idx] - baseLevel;
+
+        if (codeNumber >= 0)
+        {
+            //writeCoefRemainExGolomb(absCoeff[idx] - baseLevel, goRiceParam);
+            uint32_t length = 0;
+
+            codeNumber = ((uint32_t)codeNumber >> goRiceParam) - COEF_REMAIN_BIN_REDUCTION;
+            if (codeNumber >= 0)
+            {
+                {
+                    unsigned long cidx;
+                    CLZ(cidx, codeNumber + 1);
+                    length = cidx;
+                }
+                X265_CHECK((codeNumber != 0) || (length == 0), "length check failure\n");
+
+                codeNumber = (length + length);
+            }
+            sum += (COEF_REMAIN_BIN_REDUCTION + 1 + goRiceParam + codeNumber);
+
+            if (absCoeff[idx] > (COEF_REMAIN_BIN_REDUCTION << goRiceParam))
+                goRiceParam = (goRiceParam + 1) - (goRiceParam >> 2);
+            X265_CHECK(goRiceParam <= 4, "goRiceParam check failure\n");
+        }
+        baseLevel = 2;
+        idx++;
+    }
+    while(idx < numNonZero);
+
+    return sum;
+}
+
+
+static uint32_t costC1C2Flag_v(uint16_t *absCoeff, intptr_t numC1Flag, uint8_t *baseCtxMod, intptr_t ctxOffset)
+{
+    uint32_t sum = 0;
+    uint32_t c1 = 1;
+    uint32_t firstC2Idx = 8;
+    uint32_t firstC2Flag = 2;
+    uint32_t c1Next = 0xFFFFFFFE;
+
+    int idx = 0;
+    do
+    {
+        uint32_t symbol1 = absCoeff[idx] > 1;
+        uint32_t symbol2 = absCoeff[idx] > 2;
+        //encodeBin(symbol1, baseCtxMod[c1]);
+        {
+            const uint32_t mstate = baseCtxMod[c1];
+            baseCtxMod[c1] = sbacNext(mstate, symbol1);
+            sum += sbacGetEntropyBits(mstate, symbol1);
+        }
+
+        if (symbol1)
+            c1Next = 0;
+
+        if (symbol1 + firstC2Flag == 3)
+            firstC2Flag = symbol2;
+
+        if (symbol1 + firstC2Idx == 9)
+            firstC2Idx  = idx;
+
+        c1 = (c1Next & 3);
+        c1Next >>= 2;
+        X265_CHECK(c1 <= 3, "c1 check failure\n");
+        idx++;
+    }
+    while(idx < numC1Flag);
+
+    if (!c1)
+    {
+        X265_CHECK((firstC2Flag <= 1), "firstC2FlagIdx check failure\n");
+
+        baseCtxMod += ctxOffset;
+
+        //encodeBin(firstC2Flag, baseCtxMod[0]);
+        {
+            const uint32_t mstate = baseCtxMod[0];
+            baseCtxMod[0] = sbacNext(mstate, firstC2Flag);
+            sum += sbacGetEntropyBits(mstate, firstC2Flag);
+        }
+    }
+
+    return (sum & 0x00FFFFFF) + (c1 << 26) + (firstC2Idx << 28);
+}
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupDCTPrimitives_v(EncoderPrimitives& p)
+{
+    p.dequant_scaling = dequant_scaling_v;
+    p.dequant_normal = dequant_normal_v;
+    p.quant = quant_v;
+    p.nquant = nquant_v;
+    p.dst4x4 = dst4_v;
+    p.cu[BLOCK_4x4].dct   = dct4_v;
+    p.cu[BLOCK_8x8].dct   = dct8_v;
+    p.cu[BLOCK_16x16].dct = dct16_v;
+    p.cu[BLOCK_32x32].dct = dct32_v;
+    p.idst4x4 = idst4_v;
+    p.cu[BLOCK_4x4].idct   = idct4_v;
+    p.cu[BLOCK_8x8].idct   = idct8_v;
+    p.cu[BLOCK_16x16].idct = idct16_v;
+    p.cu[BLOCK_32x32].idct = idct32_v;
+    p.denoiseDct = denoiseDct_v;
+    p.cu[BLOCK_4x4].count_nonzero = count_nonzero_v<4>;
+    p.cu[BLOCK_8x8].count_nonzero = count_nonzero_v<8>;
+    p.cu[BLOCK_16x16].count_nonzero = count_nonzero_v<16>;
+    p.cu[BLOCK_32x32].count_nonzero = count_nonzero_v<32>;
+
+    p.cu[BLOCK_4x4].copy_cnt   = copy_count<4>;
+    p.cu[BLOCK_8x8].copy_cnt   = copy_count<8>;
+    p.cu[BLOCK_16x16].copy_cnt = copy_count<16>;
+    p.cu[BLOCK_32x32].copy_cnt = copy_count<32>;
+
+    p.scanPosLast = scanPosLast_v;
+    p.findPosFirstLast = findPosFirstLast_v;
+    p.costCoeffNxN = costCoeffNxN_v;
+    p.costCoeffRemain = costCoeffRemain_v;
+    p.costC1C2Flag = costC1C2Flag_v;
+}
+}
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-intrapred.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-intrapred.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupIntraPrimitives_v(EncoderPrimitives& p)
+{
+}
+}
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-ipfilter.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-ipfilter.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupFilterPrimitives_v(EncoderPrimitives& p)
+{
+}
+}
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-loopfilter.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-loopfilter.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupLoopFilterPrimitives_v(EncoderPrimitives& p)
+{
+}
+}
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-pixel.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-pixel.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,1293 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "vector-common.h"
+
+#include "common.h"
+#include "primitives.h"
+#include "x265.h"
+
+#include <cstdlib> // abs()
+
+using namespace X265_NS;
+
+namespace {
+// place functions in anonymous namespace (file static)
+
+template<int lx, int ly>
+int sad_c(const pixel* p1, intptr_t stride_p1, const pixel* p2, intptr_t stride_p2)
+{
+    int sum = 0;
+
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+            sum += abs(p1[x] - p2[x]);
+        p1 += stride_p1;
+        p2 += stride_p2;
+    }
+
+    return sum;
+}
+
+template<int lx, int ly>
+int sad_v(const pixel* p1, intptr_t stride_p1, const pixel* p2, intptr_t stride_p2)
+{
+    if (lx == 4 || lx == 8 || lx == 16)
+    {
+    }
+    else
+    {
+        return sad_c<lx, ly>(p1, stride_p1, p2, stride_p2);
+    }
+
+    ALIGNED_16( int sum );
+    ZERO;
+    PREP_LOAD;
+    register v_u8_t p1v, p2v;
+    register v_s32_t sumv = v_s32_zero;
+
+    for (int y = 0; y < ly; y++)
+    {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated"
+        VEC_LOAD_G(p1, p1v, lx, v_u8_t);
+        VEC_LOAD_G(p2, p2v, lx, v_u8_t);
+#pragma GCC diagnostic pop
+        sumv = (v_s32_t) vec_sum4s(
+                   vec_sub(vec_max(p1v, p2v),
+                           vec_min(p1v, p2v)),
+                   (v_u32_t) sumv );
+        p1 += stride_p1;
+        p2 += stride_p2;
+    }
+
+    if (lx == 4)
+    {
+        vec_ste(sumv, 0, &sum) ;
+    }
+    else if (lx == 8)
+    {
+        sumv = vec_sum2s(sumv, v_s32_zero) ;
+        sumv = vec_splat(sumv, 1) ;
+        vec_ste(sumv, 0, &sum) ;
+    }
+    else if (lx == 16)
+    {
+        sumv = vec_sums(sumv, v_s32_zero) ;
+        sumv = vec_splat(sumv, 3) ;
+        vec_ste(sumv, 0, &sum) ;
+    }
+
+    return sum;
+}
+
+template<int lx, int ly>
+int sad_v(const int16_t* pix1, intptr_t stride_pix1, const int16_t* pix2, intptr_t stride_pix2)
+{
+    int sum = 0;
+
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+            sum += abs(pix1[x] - pix2[x]);
+
+        pix1 += stride_pix1;
+        pix2 += stride_pix2;
+    }
+
+    return sum;
+}
+
+template<int lx, int ly>
+void sad_x3(const pixel* pix1, const pixel* pix2, const pixel* pix3, const pixel* pix4, intptr_t frefstride, int32_t* res)
+{
+    res[0] = 0;
+    res[1] = 0;
+    res[2] = 0;
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+        {
+            res[0] += abs(pix1[x] - pix2[x]);
+            res[1] += abs(pix1[x] - pix3[x]);
+            res[2] += abs(pix1[x] - pix4[x]);
+        }
+
+        pix1 += FENC_STRIDE;
+        pix2 += frefstride;
+        pix3 += frefstride;
+        pix4 += frefstride;
+    }
+}
+
+template<int lx, int ly>
+void sad_x4(const pixel* pix1, const pixel* pix2, const pixel* pix3, const pixel* pix4, const pixel* pix5, intptr_t frefstride, int32_t* res)
+{
+    res[0] = 0;
+    res[1] = 0;
+    res[2] = 0;
+    res[3] = 0;
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+        {
+            res[0] += abs(pix1[x] - pix2[x]);
+            res[1] += abs(pix1[x] - pix3[x]);
+            res[2] += abs(pix1[x] - pix4[x]);
+            res[3] += abs(pix1[x] - pix5[x]);
+        }
+
+        pix1 += FENC_STRIDE;
+        pix2 += frefstride;
+        pix3 += frefstride;
+        pix4 += frefstride;
+        pix5 += frefstride;
+    }
+}
+
+template<int lx, int ly, class T1, class T2>
+sse_ret_t sse(const T1* pix1, intptr_t stride_pix1, const T2* pix2, intptr_t stride_pix2)
+{
+    sse_ret_t sum = 0;
+    int tmp;
+
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+        {
+            tmp = pix1[x] - pix2[x];
+            sum += (tmp * tmp);
+        }
+
+        pix1 += stride_pix1;
+        pix2 += stride_pix2;
+    }
+
+    return sum;
+}
+
+#define BITS_PER_SUM (8 * sizeof(sum_t))
+
+#define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) { \
+        sum2_t t0 = s0 + s1; \
+        sum2_t t1 = s0 - s1; \
+        sum2_t t2 = s2 + s3; \
+        sum2_t t3 = s2 - s3; \
+        d0 = t0 + t2; \
+        d2 = t0 - t2; \
+        d1 = t1 + t3; \
+        d3 = t1 - t3; \
+}
+
+// in: a pseudo-simd number of the form x+(y<<16)
+// return: abs(x)+(abs(y)<<16)
+inline sum2_t abs2(sum2_t a)
+{
+    sum2_t s = ((a >> (BITS_PER_SUM - 1)) & (((sum2_t)1 << BITS_PER_SUM) + 1)) * ((sum_t)-1);
+
+    return (a + s) ^ s;
+}
+
+static int satd_4x4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    sum2_t tmp[4][2];
+    sum2_t a0, a1, a2, a3, b0, b1;
+    sum2_t sum = 0;
+
+    for (int i = 0; i < 4; i++, pix1 += stride_pix1, pix2 += stride_pix2)
+    {
+        a0 = pix1[0] - pix2[0];
+        a1 = pix1[1] - pix2[1];
+        b0 = (a0 + a1) + ((a0 - a1) << BITS_PER_SUM);
+        a2 = pix1[2] - pix2[2];
+        a3 = pix1[3] - pix2[3];
+        b1 = (a2 + a3) + ((a2 - a3) << BITS_PER_SUM);
+        tmp[i][0] = b0 + b1;
+        tmp[i][1] = b0 - b1;
+    }
+
+    for (int i = 0; i < 2; i++)
+    {
+        HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);
+        a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+        sum += ((sum_t)a0) + (a0 >> BITS_PER_SUM);
+    }
+
+    return (int)(sum >> 1);
+}
+
+static int satd_4x4(const int16_t* pix1, intptr_t stride_pix1)
+{
+    int32_t tmp[4][4];
+    int32_t s01, s23, d01, d23;
+    int32_t satd = 0;
+    int d;
+
+    for (d = 0; d < 4; d++, pix1 += stride_pix1)
+    {
+        s01 = pix1[0] + pix1[1];
+        s23 = pix1[2] + pix1[3];
+        d01 = pix1[0] - pix1[1];
+        d23 = pix1[2] - pix1[3];
+
+        tmp[d][0] = s01 + s23;
+        tmp[d][1] = s01 - s23;
+        tmp[d][2] = d01 - d23;
+        tmp[d][3] = d01 + d23;
+    }
+
+    for (d = 0; d < 4; d++)
+    {
+        s01 = tmp[0][d] + tmp[1][d];
+        s23 = tmp[2][d] + tmp[3][d];
+        d01 = tmp[0][d] - tmp[1][d];
+        d23 = tmp[2][d] - tmp[3][d];
+        satd += abs(s01 + s23) + abs(s01 - s23) + abs(d01 - d23) + abs(d01 + d23);
+    }
+    return (int)(satd / 2);
+}
+
+// x264's SWAR version of satd 8x4, performs two 4x4 SATDs at once
+static int satd_8x4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    sum2_t tmp[4][4];
+    sum2_t a0, a1, a2, a3;
+    sum2_t sum = 0;
+
+    for (int i = 0; i < 4; i++, pix1 += stride_pix1, pix2 += stride_pix2)
+    {
+        a0 = (pix1[0] - pix2[0]) + ((sum2_t)(pix1[4] - pix2[4]) << BITS_PER_SUM);
+        a1 = (pix1[1] - pix2[1]) + ((sum2_t)(pix1[5] - pix2[5]) << BITS_PER_SUM);
+        a2 = (pix1[2] - pix2[2]) + ((sum2_t)(pix1[6] - pix2[6]) << BITS_PER_SUM);
+        a3 = (pix1[3] - pix2[3]) + ((sum2_t)(pix1[7] - pix2[7]) << BITS_PER_SUM);
+        HADAMARD4(tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0, a1, a2, a3);
+    }
+
+    for (int i = 0; i < 4; i++)
+    {
+        HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);
+        sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+    }
+
+    return (((sum_t)sum) + (sum >> BITS_PER_SUM)) >> 1;
+}
+
+template<int w, int h>
+// calculate satd in blocks of 4x4
+int satd4(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    int satd = 0;
+
+    for (int row = 0; row < h; row += 4)
+        for (int col = 0; col < w; col += 4)
+            satd += satd_4x4(pix1 + row * stride_pix1 + col, stride_pix1,
+                             pix2 + row * stride_pix2 + col, stride_pix2);
+
+    return satd;
+}
+
+template<int w, int h>
+// calculate satd in blocks of 8x4
+int satd8(const pixel* pix1, intptr_t stride_pix1, const pixel* pix2, intptr_t stride_pix2)
+{
+    int satd = 0;
+
+    for (int row = 0; row < h; row += 4)
+        for (int col = 0; col < w; col += 8)
+            satd += satd_8x4(pix1 + row * stride_pix1 + col, stride_pix1,
+                             pix2 + row * stride_pix2 + col, stride_pix2);
+
+    return satd;
+}
+
+inline int _sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    sum2_t tmp[8][4];
+    sum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
+    sum2_t sum = 0;
+
+    for (int i = 0; i < 8; i++, pix1 += i_pix1, pix2 += i_pix2)
+    {
+        a0 = pix1[0] - pix2[0];
+        a1 = pix1[1] - pix2[1];
+        b0 = (a0 + a1) + ((a0 - a1) << BITS_PER_SUM);
+        a2 = pix1[2] - pix2[2];
+        a3 = pix1[3] - pix2[3];
+        b1 = (a2 + a3) + ((a2 - a3) << BITS_PER_SUM);
+        a4 = pix1[4] - pix2[4];
+        a5 = pix1[5] - pix2[5];
+        b2 = (a4 + a5) + ((a4 - a5) << BITS_PER_SUM);
+        a6 = pix1[6] - pix2[6];
+        a7 = pix1[7] - pix2[7];
+        b3 = (a6 + a7) + ((a6 - a7) << BITS_PER_SUM);
+        HADAMARD4(tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0, b1, b2, b3);
+    }
+
+    for (int i = 0; i < 4; i++)
+    {
+        HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);
+        HADAMARD4(a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i]);
+        b0  = abs2(a0 + a4) + abs2(a0 - a4);
+        b0 += abs2(a1 + a5) + abs2(a1 - a5);
+        b0 += abs2(a2 + a6) + abs2(a2 - a6);
+        b0 += abs2(a3 + a7) + abs2(a3 - a7);
+        sum += (sum_t)b0 + (b0 >> BITS_PER_SUM);
+    }
+
+    return (int)sum;
+}
+
+inline int sa8d_8x8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    return (int)((_sa8d_8x8(pix1, i_pix1, pix2, i_pix2) + 2) >> 2);
+}
+
+inline int _sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
+{
+    int32_t tmp[8][8];
+    int32_t a0, a1, a2, a3, a4, a5, a6, a7;
+    int32_t sum = 0;
+
+    for (int i = 0; i < 8; i++, pix1 += i_pix1)
+    {
+        a0 = pix1[0] + pix1[1];
+        a1 = pix1[2] + pix1[3];
+        a2 = pix1[4] + pix1[5];
+        a3 = pix1[6] + pix1[7];
+        a4 = pix1[0] - pix1[1];
+        a5 = pix1[2] - pix1[3];
+        a6 = pix1[4] - pix1[5];
+        a7 = pix1[6] - pix1[7];
+        tmp[i][0] = (a0 + a1) + (a2 + a3);
+        tmp[i][1] = (a0 + a1) - (a2 + a3);
+        tmp[i][2] = (a0 - a1) + (a2 - a3);
+        tmp[i][3] = (a0 - a1) - (a2 - a3);
+        tmp[i][4] = (a4 + a5) + (a6 + a7);
+        tmp[i][5] = (a4 + a5) - (a6 + a7);
+        tmp[i][6] = (a4 - a5) + (a6 - a7);
+        tmp[i][7] = (a4 - a5) - (a6 - a7);
+    }
+
+    for (int i = 0; i < 8; i++)
+    {
+        a0 = (tmp[0][i] + tmp[1][i]) + (tmp[2][i] + tmp[3][i]);
+        a2 = (tmp[0][i] + tmp[1][i]) - (tmp[2][i] + tmp[3][i]);
+        a1 = (tmp[0][i] - tmp[1][i]) + (tmp[2][i] - tmp[3][i]);
+        a3 = (tmp[0][i] - tmp[1][i]) - (tmp[2][i] - tmp[3][i]);
+        a4 = (tmp[4][i] + tmp[5][i]) + (tmp[6][i] + tmp[7][i]);
+        a6 = (tmp[4][i] + tmp[5][i]) - (tmp[6][i] + tmp[7][i]);
+        a5 = (tmp[4][i] - tmp[5][i]) + (tmp[6][i] - tmp[7][i]);
+        a7 = (tmp[4][i] - tmp[5][i]) - (tmp[6][i] - tmp[7][i]);
+        a0 = abs(a0 + a4) + abs(a0 - a4);
+        a0 += abs(a1 + a5) + abs(a1 - a5);
+        a0 += abs(a2 + a6) + abs(a2 - a6);
+        a0 += abs(a3 + a7) + abs(a3 - a7);
+        sum += a0;
+    }
+
+    return (int)sum;
+}
+
+static int sa8d_8x8(const int16_t* pix1, intptr_t i_pix1)
+{
+    return (int)((_sa8d_8x8(pix1, i_pix1) + 2) >> 2);
+}
+
+static int sa8d_16x16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    int sum = _sa8d_8x8(pix1, i_pix1, pix2, i_pix2)
+        + _sa8d_8x8(pix1 + 8, i_pix1, pix2 + 8, i_pix2)
+        + _sa8d_8x8(pix1 + 8 * i_pix1, i_pix1, pix2 + 8 * i_pix2, i_pix2)
+        + _sa8d_8x8(pix1 + 8 + 8 * i_pix1, i_pix1, pix2 + 8 + 8 * i_pix2, i_pix2);
+
+    // This matches x264 sa8d_16x16, but is slightly different from HM's behavior because
+    // this version only rounds once at the end
+    return (sum + 2) >> 2;
+}
+
+template<int w, int h>
+// Calculate sa8d in blocks of 8x8
+int sa8d8(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    int cost = 0;
+
+    for (int y = 0; y < h; y += 8)
+        for (int x = 0; x < w; x += 8)
+            cost += sa8d_8x8(pix1 + i_pix1 * y + x, i_pix1, pix2 + i_pix2 * y + x, i_pix2);
+
+    return cost;
+}
+
+template<int w, int h>
+// Calculate sa8d in blocks of 16x16
+int sa8d16(const pixel* pix1, intptr_t i_pix1, const pixel* pix2, intptr_t i_pix2)
+{
+    int cost = 0;
+
+    for (int y = 0; y < h; y += 16)
+        for (int x = 0; x < w; x += 16)
+            cost += sa8d_16x16(pix1 + i_pix1 * y + x, i_pix1, pix2 + i_pix2 * y + x, i_pix2);
+
+    return cost;
+}
+
+template<int size>
+int pixel_ssd_s_v(const int16_t* a, intptr_t dstride)
+{
+    int sum = 0;
+    for (int y = 0; y < size; y++)
+    {
+        for (int x = 0; x < size; x++)
+            sum += a[x] * a[x];
+
+        a += dstride;
+    }
+    return sum;
+}
+
+template<int size>
+void blockfill_s_v(int16_t* dst, intptr_t dstride, int16_t val)
+{
+    for (int y = 0; y < size; y++)
+        for (int x = 0; x < size; x++)
+            dst[y * dstride + x] = val;
+}
+
+template<int size>
+void cpy2Dto1D_shl(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift)
+{
+    X265_CHECK(((intptr_t)dst & 15) == 0, "dst alignment error\n");
+    X265_CHECK((((intptr_t)src | (srcStride * sizeof(*src))) & 15) == 0 || size == 4, "src alignment error\n");
+    X265_CHECK(shift >= 0, "invalid shift\n");
+
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = src[j] << shift;
+
+        src += srcStride;
+        dst += size;
+    }
+}
+
+template<int size>
+void cpy2Dto1D_shr(int16_t* dst, const int16_t* src, intptr_t srcStride, int shift)
+{
+    X265_CHECK(((intptr_t)dst & 15) == 0, "dst alignment error\n");
+    X265_CHECK((((intptr_t)src | (srcStride * sizeof(*src))) & 15) == 0 || size == 4, "src alignment error\n");
+    X265_CHECK(shift > 0, "invalid shift\n");
+
+    int16_t round = 1 << (shift - 1);
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = (src[j] + round) >> shift;
+
+        src += srcStride;
+        dst += size;
+    }
+}
+
+template<int size>
+void cpy1Dto2D_shl(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+{
+    X265_CHECK((((intptr_t)dst | (dstStride * sizeof(*dst))) & 15) == 0 || size == 4, "dst alignment error\n");
+    X265_CHECK(((intptr_t)src & 15) == 0, "src alignment error\n");
+    X265_CHECK(shift >= 0, "invalid shift\n");
+
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = src[j] << shift;
+
+        src += size;
+        dst += dstStride;
+    }
+}
+
+template<int size>
+void cpy1Dto2D_shr(int16_t* dst, const int16_t* src, intptr_t dstStride, int shift)
+{
+    X265_CHECK((((intptr_t)dst | (dstStride * sizeof(*dst))) & 15) == 0 || size == 4, "dst alignment error\n");
+    X265_CHECK(((intptr_t)src & 15) == 0, "src alignment error\n");
+    X265_CHECK(shift > 0, "invalid shift\n");
+
+    int16_t round = 1 << (shift - 1);
+    for (int i = 0; i < size; i++)
+    {
+        for (int j = 0; j < size; j++)
+            dst[j] = (src[j] + round) >> shift;
+
+        src += size;
+        dst += dstStride;
+    }
+}
+
+template<int blockSize>
+void getResidual(const pixel* fenc, const pixel* pred, int16_t* residual, intptr_t stride)
+{
+    for (int y = 0; y < blockSize; y++)
+    {
+        for (int x = 0; x < blockSize; x++)
+            residual[x] = static_cast<int16_t>(fenc[x]) - static_cast<int16_t>(pred[x]);
+
+        fenc += stride;
+        residual += stride;
+        pred += stride;
+    }
+}
+
+template<int blockSize>
+void transpose(pixel* dst, const pixel* src, intptr_t stride)
+{
+    for (int k = 0; k < blockSize; k++)
+        for (int l = 0; l < blockSize; l++)
+            dst[k * blockSize + l] = src[l * stride + k];
+}
+
+static void weight_sp_v(const int16_t* src, pixel* dst, intptr_t srcStride, intptr_t dstStride, int width, int height, int w0, int round, int shift, int offset)
+{
+    int x, y;
+
+#if CHECKED_BUILD || _DEBUG
+    const int correction = (IF_INTERNAL_PREC - X265_DEPTH);
+    X265_CHECK(!((w0 << 6) > 32767), "w0 using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK(!(round > 32767), "round using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK((shift >= correction), "shift must be include factor correction, please update ASM ABI\n");
+#endif
+
+    for (y = 0; y <= height - 1; y++)
+    {
+        for (x = 0; x <= width - 1; )
+        {
+            // note: width can be odd
+            dst[x] = x265_clip(((w0 * (src[x] + IF_INTERNAL_OFFS) + round) >> shift) + offset);
+            x++;
+        }
+
+        src += srcStride;
+        dst += dstStride;
+    }
+}
+
+static void weight_pp_v(const pixel* src, pixel* dst, intptr_t stride, int width, int height, int w0, int round, int shift, int offset)
+{
+    int x, y;
+
+    const int correction = (IF_INTERNAL_PREC - X265_DEPTH);
+
+    X265_CHECK(!(width & 15), "weightp alignment error\n");
+    X265_CHECK(!((w0 << 6) > 32767), "w0 using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK(!(round > 32767), "round using more than 16 bits, asm output will mismatch\n");
+    X265_CHECK((shift >= correction), "shift must be include factor correction, please update ASM ABI\n");
+    X265_CHECK(!(round & ((1 << correction) - 1)), "round must be include factor correction, please update ASM ABI\n");
+
+    for (y = 0; y <= height - 1; y++)
+    {
+        for (x = 0; x <= width - 1; )
+        {
+            // simulating pixel to short conversion
+            int16_t val = src[x] << correction;
+            dst[x] = x265_clip(((w0 * (val) + round) >> shift) + offset);
+            x++;
+        }
+
+        src += stride;
+        dst += stride;
+    }
+}
+
+template<int lx, int ly>
+void pixelavg_pp(pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int)
+{
+    for (int y = 0; y < ly; y++)
+    {
+        for (int x = 0; x < lx; x++)
+            dst[x] = (src0[x] + src1[x] + 1) >> 1;
+
+        src0 += sstride0;
+        src1 += sstride1;
+        dst += dstride;
+    }
+}
+
+static void scale1D_128to64(pixel *dst, const pixel *src)
+{
+    int x;
+    const pixel* src1 = src;
+    const pixel* src2 = src + 128;
+
+    pixel* dst1 = dst;
+    pixel* dst2 = dst + 64/*128*/;
+
+    for (x = 0; x < 128; x += 2)
+    {
+        // Top pixel
+        pixel pix0 = src1[(x + 0)];
+        pixel pix1 = src1[(x + 1)];
+
+        // Left pixel
+        pixel pix2 = src2[(x + 0)];
+        pixel pix3 = src2[(x + 1)];
+        int sum1 = pix0 + pix1;
+        int sum2 = pix2 + pix3;
+
+        dst1[x >> 1] = (pixel)((sum1 + 1) >> 1);
+        dst2[x >> 1] = (pixel)((sum2 + 1) >> 1);
+    }
+}
+
+static void scale2D_64to32(pixel* dst, const pixel* src, intptr_t stride)
+{
+    uint32_t x, y;
+
+    for (y = 0; y < 64; y += 2)
+    {
+        for (x = 0; x < 64; x += 2)
+        {
+            pixel pix0 = src[(y + 0) * stride + (x + 0)];
+            pixel pix1 = src[(y + 0) * stride + (x + 1)];
+            pixel pix2 = src[(y + 1) * stride + (x + 0)];
+            pixel pix3 = src[(y + 1) * stride + (x + 1)];
+            int sum = pix0 + pix1 + pix2 + pix3;
+
+            dst[y / 2 * 32 + x / 2] = (pixel)((sum + 2) >> 2);
+        }
+    }
+}
+
+static
+void frame_init_lowres_core(const pixel* src0, pixel* dst0, pixel* dsth, pixel* dstv, pixel* dstc,
+                            intptr_t src_stride, intptr_t dst_stride, int width, int height)
+{
+    for (int y = 0; y < height; y++)
+    {
+        const pixel* src1 = src0 + src_stride;
+        const pixel* src2 = src1 + src_stride;
+        for (int x = 0; x < width; x++)
+        {
+            // slower than naive bilinear, but matches asm
+#define FILTER(a, b, c, d) ((((a + b + 1) >> 1) + ((c + d + 1) >> 1) + 1) >> 1)
+            dst0[x] = FILTER(src0[2 * x], src1[2 * x], src0[2 * x + 1], src1[2 * x + 1]);
+            dsth[x] = FILTER(src0[2 * x + 1], src1[2 * x + 1], src0[2 * x + 2], src1[2 * x + 2]);
+            dstv[x] = FILTER(src1[2 * x], src2[2 * x], src1[2 * x + 1], src2[2 * x + 1]);
+            dstc[x] = FILTER(src1[2 * x + 1], src2[2 * x + 1], src1[2 * x + 2], src2[2 * x + 2]);
+#undef FILTER
+        }
+        src0 += src_stride * 2;
+        dst0 += dst_stride;
+        dsth += dst_stride;
+        dstv += dst_stride;
+        dstc += dst_stride;
+    }
+}
+
+/* structural similarity metric */
+static void ssim_4x4x2_core(const pixel* pix1, intptr_t stride1, const pixel* pix2, intptr_t stride2, int sums[2][4])
+{
+    for (int z = 0; z < 2; z++)
+    {
+        uint32_t s1 = 0, s2 = 0, ss = 0, s12 = 0;
+        for (int y = 0; y < 4; y++)
+        {
+            for (int x = 0; x < 4; x++)
+            {
+                int a = pix1[x + y * stride1];
+                int b = pix2[x + y * stride2];
+                s1 += a;
+                s2 += b;
+                ss += a * a;
+                ss += b * b;
+                s12 += a * b;
+            }
+        }
+
+        sums[z][0] = s1;
+        sums[z][1] = s2;
+        sums[z][2] = ss;
+        sums[z][3] = s12;
+        pix1 += 4;
+        pix2 += 4;
+    }
+}
+
+static float ssim_end_1(int s1, int s2, int ss, int s12)
+{
+/* Maximum value for 10-bit is: ss*64 = (2^10-1)^2*16*4*64 = 4286582784, which will overflow in some cases.
+ * s1*s1, s2*s2, and s1*s2 also obtain this value for edge cases: ((2^10-1)*16*4)^2 = 4286582784.
+ * Maximum value for 9-bit is: ss*64 = (2^9-1)^2*16*4*64 = 1069551616, which will not overflow. */
+
+#define PIXEL_MAX ((1 << X265_DEPTH) - 1)
+#if HIGH_BIT_DEPTH
+    X265_CHECK((X265_DEPTH == 10) || (X265_DEPTH == 12), "ssim invalid depth\n");
+#define type float
+    static const float ssim_c1 = (float)(.01 * .01 * PIXEL_MAX * PIXEL_MAX * 64);
+    static const float ssim_c2 = (float)(.03 * .03 * PIXEL_MAX * PIXEL_MAX * 64 * 63);
+#else
+    X265_CHECK(X265_DEPTH == 8, "ssim invalid depth\n");
+#define type int
+    static const int ssim_c1 = (int)(.01 * .01 * PIXEL_MAX * PIXEL_MAX * 64 + .5);
+    static const int ssim_c2 = (int)(.03 * .03 * PIXEL_MAX * PIXEL_MAX * 64 * 63 + .5);
+#endif
+    type fs1 = (type)s1;
+    type fs2 = (type)s2;
+    type fss = (type)ss;
+    type fs12 = (type)s12;
+    type vars = (type)(fss * 64 - fs1 * fs1 - fs2 * fs2);
+    type covar = (type)(fs12 * 64 - fs1 * fs2);
+    return (float)(2 * fs1 * fs2 + ssim_c1) * (float)(2 * covar + ssim_c2)
+           / ((float)(fs1 * fs1 + fs2 * fs2 + ssim_c1) * (float)(vars + ssim_c2));
+#undef type
+#undef PIXEL_MAX
+}
+
+static float ssim_end_4(int sum0[5][4], int sum1[5][4], int width)
+{
+    float ssim = 0.0;
+
+    for (int i = 0; i < width; i++)
+    {
+        ssim += ssim_end_1(sum0[i][0] + sum0[i + 1][0] + sum1[i][0] + sum1[i + 1][0],
+                           sum0[i][1] + sum0[i + 1][1] + sum1[i][1] + sum1[i + 1][1],
+                           sum0[i][2] + sum0[i + 1][2] + sum1[i][2] + sum1[i + 1][2],
+                           sum0[i][3] + sum0[i + 1][3] + sum1[i][3] + sum1[i + 1][3]);
+    }
+
+    return ssim;
+}
+
+template<int size>
+uint64_t pixel_var(const pixel* pix, intptr_t i_stride)
+{
+    uint32_t sum = 0, sqr = 0;
+
+    for (int y = 0; y < size; y++)
+    {
+        for (int x = 0; x < size; x++)
+        {
+            sum += pix[x];
+            sqr += pix[x] * pix[x];
+        }
+
+        pix += i_stride;
+    }
+
+    return sum + ((uint64_t)sqr << 32);
+}
+
+#if defined(_MSC_VER)
+#pragma warning(disable: 4127) // conditional expression is constant
+#endif
+
+template<int size>
+int psyCost_pp(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride)
+{
+    static pixel zeroBuf[8] /* = { 0 } */;
+
+    if (size)
+    {
+        int dim = 1 << (size + 2);
+        uint32_t totEnergy = 0;
+        for (int i = 0; i < dim; i += 8)
+        {
+            for (int j = 0; j < dim; j+= 8)
+            {
+                /* AC energy, measured by sa8d (AC + DC) minus SAD (DC) */
+                int sourceEnergy = sa8d_8x8(source + i * sstride + j, sstride, zeroBuf, 0) -
+                                   (sad_v<8, 8>(source + i * sstride + j, sstride, zeroBuf, 0) >> 2);
+                int reconEnergy =  sa8d_8x8(recon + i * rstride + j, rstride, zeroBuf, 0) -
+                                   (sad_v<8, 8>(recon + i * rstride + j, rstride, zeroBuf, 0) >> 2);
+
+                totEnergy += abs(sourceEnergy - reconEnergy);
+            }
+        }
+        return totEnergy;
+    }
+    else
+    {
+        /* 4x4 is too small for sa8d */
+        int sourceEnergy = satd_4x4(source, sstride, zeroBuf, 0) - (sad_v<4, 4>(source, sstride, zeroBuf, 0) >> 2);
+        int reconEnergy = satd_4x4(recon, rstride, zeroBuf, 0) - (sad_v<4, 4>(recon, rstride, zeroBuf, 0) >> 2);
+        return abs(sourceEnergy - reconEnergy);
+    }
+}
+
+template<int size>
+int psyCost_ss(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride)
+{
+    static int16_t zeroBuf[8] /* = { 0 } */;
+
+    if (size)
+    {
+        int dim = 1 << (size + 2);
+        uint32_t totEnergy = 0;
+        for (int i = 0; i < dim; i += 8)
+        {
+            for (int j = 0; j < dim; j+= 8)
+            {
+                /* AC energy, measured by sa8d (AC + DC) minus SAD (DC) */
+                int sourceEnergy = sa8d_8x8(source + i * sstride + j, sstride) -
+                                   (sad_v<8, 8>(source + i * sstride + j, sstride, zeroBuf, 0) >> 2);
+                int reconEnergy =  sa8d_8x8(recon + i * rstride + j, rstride) -
+                                   (sad_v<8, 8>(recon + i * rstride + j, rstride, zeroBuf, 0) >> 2);
+
+                totEnergy += abs(sourceEnergy - reconEnergy);
+            }
+        }
+        return totEnergy;
+    }
+    else
+    {
+        /* 4x4 is too small for sa8d */
+        int sourceEnergy = satd_4x4(source, sstride) - (sad_v<4, 4>(source, sstride, zeroBuf, 0) >> 2);
+        int reconEnergy = satd_4x4(recon, rstride) - (sad_v<4, 4>(recon, rstride, zeroBuf, 0) >> 2);
+        return abs(sourceEnergy - reconEnergy);
+    }
+}
+
+template<int bx, int by>
+void blockcopy_pp_v(pixel* a, intptr_t stridea, const pixel* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = b[x];
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void blockcopy_ss_v(int16_t* a, intptr_t stridea, const int16_t* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = b[x];
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void blockcopy_sp_v(pixel* a, intptr_t stridea, const int16_t* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+        {
+            X265_CHECK((b[x] >= 0) && (b[x] <= ((1 << X265_DEPTH) - 1)), "blockcopy pixel size fail\n");
+            a[x] = (pixel)b[x];
+        }
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void blockcopy_ps_v(int16_t* a, intptr_t stridea, const pixel* b, intptr_t strideb)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = (int16_t)b[x];
+
+        a += stridea;
+        b += strideb;
+    }
+}
+
+template<int bx, int by>
+void pixel_sub_ps_v(int16_t* a, intptr_t dstride, const pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = (int16_t)(b0[x] - b1[x]);
+
+        b0 += sstride0;
+        b1 += sstride1;
+        a += dstride;
+    }
+}
+
+template<int bx, int by>
+void pixel_add_ps_v(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1)
+{
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x++)
+            a[x] = x265_clip(b0[x] + b1[x]);
+
+        b0 += sstride0;
+        b1 += sstride1;
+        a += dstride;
+    }
+}
+
+template<int bx, int by>
+void addAvg(const int16_t* src0, const int16_t* src1, pixel* dst, intptr_t src0Stride, intptr_t src1Stride, intptr_t dstStride)
+{
+    int shiftNum, offset;
+
+    shiftNum = IF_INTERNAL_PREC + 1 - X265_DEPTH;
+    offset = (1 << (shiftNum - 1)) + 2 * IF_INTERNAL_OFFS;
+
+    for (int y = 0; y < by; y++)
+    {
+        for (int x = 0; x < bx; x += 2)
+        {
+            dst[x + 0] = x265_clip((src0[x + 0] + src1[x + 0] + offset) >> shiftNum);
+            dst[x + 1] = x265_clip((src0[x + 1] + src1[x + 1] + offset) >> shiftNum);
+        }
+
+        src0 += src0Stride;
+        src1 += src1Stride;
+        dst  += dstStride;
+    }
+}
+
+static void planecopy_cp_v(const uint8_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift)
+{
+    for (int r = 0; r < height; r++)
+    {
+        for (int c = 0; c < width; c++)
+            dst[c] = ((pixel)src[c]) << shift;
+
+        dst += dstStride;
+        src += srcStride;
+    }
+}
+
+static void planecopy_sp_v(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+{
+    for (int r = 0; r < height; r++)
+    {
+        for (int c = 0; c < width; c++)
+            dst[c] = (pixel)((src[c] >> shift) & mask);
+
+        dst += dstStride;
+        src += srcStride;
+    }
+}
+
+static void planecopy_sp_shl_v(const uint16_t* src, intptr_t srcStride, pixel* dst, intptr_t dstStride, int width, int height, int shift, uint16_t mask)
+{
+    for (int r = 0; r < height; r++)
+    {
+        for (int c = 0; c < width; c++)
+            dst[c] = (pixel)((src[c] << shift) & mask);
+
+        dst += dstStride;
+        src += srcStride;
+    }
+}
+
+/* Estimate the total amount of influence on future quality that could be had if we
+ * were to improve the reference samples used to inter predict any given CU. */
+static void estimateCUPropagateCost(int* dst, const uint16_t* propagateIn, const int32_t* intraCosts, const uint16_t* interCosts,
+                             const int32_t* invQscales, const double* fpsFactor, int len)
+{
+    double fps = *fpsFactor / 256;
+
+    for (int i = 0; i < len; i++)
+    {
+        double intraCost       = intraCosts[i] * invQscales[i];
+        double propagateAmount = (double)propagateIn[i] + intraCost * fps;
+        double propagateNum    = (double)intraCosts[i] - (interCosts[i] & ((1 << 14) - 1));
+        double propagateDenom  = (double)intraCosts[i];
+        dst[i] = (int)(propagateAmount * propagateNum / propagateDenom + 0.5);
+    }
+}
+}  // end anonymous namespace
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupPixelPrimitives_v(EncoderPrimitives& p)
+{
+#define LUMA_PU(W, H) \
+    p.pu[LUMA_ ## W ## x ## H].copy_pp = blockcopy_pp_v<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].addAvg = addAvg<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].sad = sad_v<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].sad_x3 = sad_x3<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].sad_x4 = sad_x4<W, H>; \
+    p.pu[LUMA_ ## W ## x ## H].pixelavg_pp = pixelavg_pp<W, H>;
+
+#define LUMA_CU(W, H) \
+    p.cu[BLOCK_ ## W ## x ## H].sub_ps        = pixel_sub_ps_v<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].add_ps        = pixel_add_ps_v<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].copy_sp       = blockcopy_sp_v<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].copy_ps       = blockcopy_ps_v<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].copy_ss       = blockcopy_ss_v<W, H>; \
+    p.cu[BLOCK_ ## W ## x ## H].blockfill_s   = blockfill_s_v<W>;  \
+    p.cu[BLOCK_ ## W ## x ## H].cpy2Dto1D_shl = cpy2Dto1D_shl<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].cpy2Dto1D_shr = cpy2Dto1D_shr<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].cpy1Dto2D_shl = cpy1Dto2D_shl<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].cpy1Dto2D_shr = cpy1Dto2D_shr<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].psy_cost_pp   = psyCost_pp<BLOCK_ ## W ## x ## H>; \
+    p.cu[BLOCK_ ## W ## x ## H].psy_cost_ss   = psyCost_ss<BLOCK_ ## W ## x ## H>; \
+    p.cu[BLOCK_ ## W ## x ## H].transpose     = transpose<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].ssd_s         = pixel_ssd_s_v<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].var           = pixel_var<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].calcresidual  = getResidual<W>; \
+    p.cu[BLOCK_ ## W ## x ## H].sse_pp        = sse<W, H, pixel, pixel>; \
+    p.cu[BLOCK_ ## W ## x ## H].sse_ss        = sse<W, H, int16_t, int16_t>;
+
+    LUMA_PU(4, 4);
+    LUMA_PU(8, 8);
+    LUMA_PU(16, 16);
+    LUMA_PU(32, 32);
+    LUMA_PU(64, 64);
+    LUMA_PU(4, 8);
+    LUMA_PU(8, 4);
+    LUMA_PU(16,  8);
+    LUMA_PU(8, 16);
+    LUMA_PU(16, 12);
+    LUMA_PU(12, 16);
+    LUMA_PU(16,  4);
+    LUMA_PU(4, 16);
+    LUMA_PU(32, 16);
+    LUMA_PU(16, 32);
+    LUMA_PU(32, 24);
+    LUMA_PU(24, 32);
+    LUMA_PU(32,  8);
+    LUMA_PU(8, 32);
+    LUMA_PU(64, 32);
+    LUMA_PU(32, 64);
+    LUMA_PU(64, 48);
+    LUMA_PU(48, 64);
+    LUMA_PU(64, 16);
+    LUMA_PU(16, 64);
+
+    p.pu[LUMA_4x4].satd   = satd_4x4;
+    p.pu[LUMA_8x8].satd   = satd8<8, 8>;
+    p.pu[LUMA_8x4].satd   = satd_8x4;
+    p.pu[LUMA_4x8].satd   = satd4<4, 8>;
+    p.pu[LUMA_16x16].satd = satd8<16, 16>;
+    p.pu[LUMA_16x8].satd  = satd8<16, 8>;
+    p.pu[LUMA_8x16].satd  = satd8<8, 16>;
+    p.pu[LUMA_16x12].satd = satd8<16, 12>;
+    p.pu[LUMA_12x16].satd = satd4<12, 16>;
+    p.pu[LUMA_16x4].satd  = satd8<16, 4>;
+    p.pu[LUMA_4x16].satd  = satd4<4, 16>;
+    p.pu[LUMA_32x32].satd = satd8<32, 32>;
+    p.pu[LUMA_32x16].satd = satd8<32, 16>;
+    p.pu[LUMA_16x32].satd = satd8<16, 32>;
+    p.pu[LUMA_32x24].satd = satd8<32, 24>;
+    p.pu[LUMA_24x32].satd = satd8<24, 32>;
+    p.pu[LUMA_32x8].satd  = satd8<32, 8>;
+    p.pu[LUMA_8x32].satd  = satd8<8, 32>;
+    p.pu[LUMA_64x64].satd = satd8<64, 64>;
+    p.pu[LUMA_64x32].satd = satd8<64, 32>;
+    p.pu[LUMA_32x64].satd = satd8<32, 64>;
+    p.pu[LUMA_64x48].satd = satd8<64, 48>;
+    p.pu[LUMA_48x64].satd = satd8<48, 64>;
+    p.pu[LUMA_64x16].satd = satd8<64, 16>;
+    p.pu[LUMA_16x64].satd = satd8<16, 64>;
+
+    LUMA_CU(4, 4);
+    LUMA_CU(8, 8);
+    LUMA_CU(16, 16);
+    LUMA_CU(32, 32);
+    LUMA_CU(64, 64);
+
+    p.cu[BLOCK_4x4].sa8d   = satd_4x4;
+    p.cu[BLOCK_8x8].sa8d   = sa8d_8x8;
+    p.cu[BLOCK_16x16].sa8d = sa8d_16x16;
+    p.cu[BLOCK_32x32].sa8d = sa8d16<32, 32>;
+    p.cu[BLOCK_64x64].sa8d = sa8d16<64, 64>;
+
+#define CHROMA_PU_420(W, H) \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].addAvg  = addAvg<W, H>;         \
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_ ## W ## x ## H].copy_pp = blockcopy_pp_v<W, H>; \
+
+    CHROMA_PU_420(2, 2);
+    CHROMA_PU_420(2, 4);
+    CHROMA_PU_420(4, 4);
+    CHROMA_PU_420(8, 8);
+    CHROMA_PU_420(16, 16);
+    CHROMA_PU_420(32, 32);
+    CHROMA_PU_420(4, 2);
+    CHROMA_PU_420(8, 4);
+    CHROMA_PU_420(4, 8);
+    CHROMA_PU_420(8, 6);
+    CHROMA_PU_420(6, 8);
+    CHROMA_PU_420(8, 2);
+    CHROMA_PU_420(2, 8);
+    CHROMA_PU_420(16, 8);
+    CHROMA_PU_420(8,  16);
+    CHROMA_PU_420(16, 12);
+    CHROMA_PU_420(12, 16);
+    CHROMA_PU_420(16, 4);
+    CHROMA_PU_420(4,  16);
+    CHROMA_PU_420(32, 16);
+    CHROMA_PU_420(16, 32);
+    CHROMA_PU_420(32, 24);
+    CHROMA_PU_420(24, 32);
+    CHROMA_PU_420(32, 8);
+    CHROMA_PU_420(8,  32);
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x2].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].satd   = satd_4x4;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].satd   = satd8<8, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].satd = satd8<16, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].satd = satd8<32, 32>;
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x4].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].satd   = satd_8x4;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].satd   = satd4<4, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].satd  = satd8<16, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].satd  = satd8<8, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].satd = satd8<32, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].satd = satd8<16, 32>;
+
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_2x8].satd   = NULL;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].satd = satd4<16, 12>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].satd = satd4<12, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].satd  = satd4<16, 4>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].satd  = satd4<4, 16>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].satd = satd8<32, 24>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].satd = satd8<24, 32>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].satd  = satd8<32, 8>;
+    p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].satd  = satd8<8, 32>;
+
+#define CHROMA_CU_420(W, H) \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].sse_pp  = sse<W, H, pixel, pixel>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].copy_sp = blockcopy_sp_v<W, H>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].copy_ps = blockcopy_ps_v<W, H>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].copy_ss = blockcopy_ss_v<W, H>; \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].sub_ps = pixel_sub_ps_v<W, H>;  \
+    p.chroma[X265_CSP_I420].cu[BLOCK_420_ ## W ## x ## H].add_ps = pixel_add_ps_v<W, H>;
+
+    CHROMA_CU_420(2, 2)
+    CHROMA_CU_420(4, 4)
+    CHROMA_CU_420(8, 8)
+    CHROMA_CU_420(16, 16)
+    CHROMA_CU_420(32, 32)
+
+    p.chroma[X265_CSP_I420].cu[BLOCK_8x8].sa8d   = p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].satd;
+    p.chroma[X265_CSP_I420].cu[BLOCK_16x16].sa8d = sa8d8<8, 8>;
+    p.chroma[X265_CSP_I420].cu[BLOCK_32x32].sa8d = sa8d16<16, 16>;
+    p.chroma[X265_CSP_I420].cu[BLOCK_64x64].sa8d = sa8d16<32, 32>;
+
+#define CHROMA_PU_422(W, H) \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].addAvg  = addAvg<W, H>;         \
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_ ## W ## x ## H].copy_pp = blockcopy_pp_v<W, H>; \
+
+    CHROMA_PU_422(2, 4);
+    CHROMA_PU_422(4, 8);
+    CHROMA_PU_422(8, 16);
+    CHROMA_PU_422(16, 32);
+    CHROMA_PU_422(32, 64);
+    CHROMA_PU_422(4, 4);
+    CHROMA_PU_422(2, 8);
+    CHROMA_PU_422(8, 8);
+    CHROMA_PU_422(4, 16);
+    CHROMA_PU_422(8, 12);
+    CHROMA_PU_422(6, 16);
+    CHROMA_PU_422(8, 4);
+    CHROMA_PU_422(2, 16);
+    CHROMA_PU_422(16, 16);
+    CHROMA_PU_422(8, 32);
+    CHROMA_PU_422(16, 24);
+    CHROMA_PU_422(12, 32);
+    CHROMA_PU_422(16, 8);
+    CHROMA_PU_422(4,  32);
+    CHROMA_PU_422(32, 32);
+    CHROMA_PU_422(16, 64);
+    CHROMA_PU_422(32, 48);
+    CHROMA_PU_422(24, 64);
+    CHROMA_PU_422(32, 16);
+    CHROMA_PU_422(8,  64);
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x4].satd   = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd   = satd4<4, 8>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].satd  = satd8<8, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].satd = satd8<16, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].satd = satd8<32, 64>;
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x4].satd   = satd_4x4;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x8].satd   = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].satd   = satd8<8, 8>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].satd  = satd4<4, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].satd = satd8<16, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].satd  = satd8<8, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].satd = satd8<32, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].satd = satd8<16, 64>;
+
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].satd  = satd4<8, 12>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].satd  = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].satd   = satd4<8, 4>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_2x16].satd  = NULL;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].satd = satd8<16, 24>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].satd = satd4<12, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].satd  = satd8<16, 8>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].satd  = satd4<4, 32>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].satd = satd8<32, 48>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].satd = satd8<24, 64>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].satd = satd8<32, 16>;
+    p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].satd  = satd8<8, 64>;
+
+#define CHROMA_CU_422(W, H) \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].sse_pp  = sse<W, H, pixel, pixel>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].copy_sp = blockcopy_sp_v<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].copy_ps = blockcopy_ps_v<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].copy_ss = blockcopy_ss_v<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].sub_ps = pixel_sub_ps_v<W, H>; \
+    p.chroma[X265_CSP_I422].cu[BLOCK_422_ ## W ## x ## H].add_ps = pixel_add_ps_v<W, H>;
+
+    CHROMA_CU_422(2, 4)
+    CHROMA_CU_422(4, 8)
+    CHROMA_CU_422(8, 16)
+    CHROMA_CU_422(16, 32)
+    CHROMA_CU_422(32, 64)
+
+    p.chroma[X265_CSP_I422].cu[BLOCK_8x8].sa8d   = p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].satd;
+    p.chroma[X265_CSP_I422].cu[BLOCK_16x16].sa8d = sa8d8<8, 16>;
+    p.chroma[X265_CSP_I422].cu[BLOCK_32x32].sa8d = sa8d16<16, 32>;
+    p.chroma[X265_CSP_I422].cu[BLOCK_64x64].sa8d = sa8d16<32, 64>;
+
+    p.weight_pp = weight_pp_v;
+    p.weight_sp = weight_sp_v;
+
+    p.scale1D_128to64 = scale1D_128to64;
+    p.scale2D_64to32 = scale2D_64to32;
+    p.frameInitLowres = frame_init_lowres_core;
+    p.ssim_4x4x2_core = ssim_4x4x2_core;
+    p.ssim_end_4 = ssim_end_4;
+
+    p.planecopy_cp = planecopy_cp_v;
+    p.planecopy_sp = planecopy_sp_v;
+    p.planecopy_sp_shl = planecopy_sp_shl_v;
+    p.propagateCost = estimateCUPropagateCost;
+}
+}
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-primitives.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-primitives.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,48 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+namespace X265_NS {
+// x265 private namespace
+
+#if X265_ARCH_POWER
+void setupPixelPrimitives_v(EncoderPrimitives &p);
+void setupDCTPrimitives_v(EncoderPrimitives &p);
+void setupFilterPrimitives_v(EncoderPrimitives &p);
+void setupIntraPrimitives_v(EncoderPrimitives &p);
+void setupLoopFilterPrimitives_v(EncoderPrimitives &p);
+void setupSaoPrimitives_v(EncoderPrimitives &p);
+
+void setupVectorPrimitives(EncoderPrimitives &p)
+{
+    setupPixelPrimitives_v(p);      // vector-pixel.cpp
+    setupDCTPrimitives_v(p);        // vector-dct.cpp
+    setupFilterPrimitives_v(p);     // vector-ipfilter.cpp
+    setupIntraPrimitives_v(p);      // vector-intrapred.cpp
+    setupLoopFilterPrimitives_v(p); // vector-loopfilter.cpp
+    setupSaoPrimitives_v(p);        // vector-sao.cpp
+}
+#endif // if X265_ARCH_POWER
+}
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-sao.cpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-sao.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#include "common.h"
+#include "primitives.h"
+
+using namespace X265_NS;
+
+namespace X265_NS {
+// x265 private namespace
+
+void setupSaoPrimitives_v(EncoderPrimitives& p)
+{
+}
+}
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/POWER/vector-types.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/source/common/POWER/vector-types.h	Fri Sep 11 20:12:51 2015 +0200
@@ -0,0 +1,34 @@
+/*****************************************************************************
+ * Copyright (C) 2015 x265 project
+ *
+ * Authors: Peter Kovář <peter.kovar at reflexion.tv>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at license @ x265.com.
+ *****************************************************************************/
+
+#ifndef _VECTOR_TYPES_H_
+#define _VECTOR_TYPES_H_ 1
+
+#define v_u8_t  vector unsigned char
+#define v_s8_t  vector signed char
+#define v_u16_t vector unsigned short
+#define v_s16_t vector signed short
+#define v_u32_t vector unsigned int
+#define v_s32_t vector signed int
+
+#endif /* _VECTOR_TYPES_H_ */
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/primitives.cpp
--- a/source/common/primitives.cpp	Fri Sep 11 20:44:56 2015 +0200
+++ b/source/common/primitives.cpp	Fri Sep 11 20:12:51 2015 +0200
@@ -68,6 +68,7 @@
     setupSaoPrimitives_c(p);        // sao.cpp
 }
 
+
 void setupAliasPrimitives(EncoderPrimitives &p)
 {
 #if HIGH_BIT_DEPTH
@@ -227,6 +228,8 @@
 
 void x265_setup_primitives(x265_param *param)
 {
+    x265_log(param, X265_LOG_INFO, "%s ()\r", __func__);
+
     if (!primitives.pu[0].sad)
     {
         setupCPrimitives(primitives);
@@ -242,6 +245,10 @@
         setupAssemblyPrimitives(primitives, param->cpuid);
 #endif
 
+#if X265_ARCH_POWER
+        setupVectorPrimitives(primitives);
+#endif
+
         setupAliasPrimitives(primitives);
     }
 
diff -r f0b3b46f172e -r 3cafbcfb10ed source/common/primitives.h
--- a/source/common/primitives.h	Fri Sep 11 20:44:56 2015 +0200
+++ b/source/common/primitives.h	Fri Sep 11 20:12:51 2015 +0200
@@ -403,6 +403,9 @@
 void setupInstrinsicPrimitives(EncoderPrimitives &p, int cpuMask);
 void setupAssemblyPrimitives(EncoderPrimitives &p, int cpuMask);
 void setupAliasPrimitives(EncoderPrimitives &p);
+#if X265_ARCH_POWER
+void setupVectorPrimitives(EncoderPrimitives &p);
+#endif // if X265_ARCH_POWER
 }
 
 #if !EXPORT_C_API


More information about the x265-devel mailing list