[x265] [PATCH 2 of 2] vectorized horizontal weighted filter
deepthidevaki at multicorewareinc.com
deepthidevaki at multicorewareinc.com
Mon Aug 5 12:35:09 CEST 2013
# HG changeset patch
# User Deepthi Devaki
# Date 1375698191 -19800
# Node ID 894e47d258a7b12a41b51a72cb2d256a2d13899c
# Parent 87dbfdda0769a2ff9a53fd424d94c1bfa995c3ed
vectorized horizontal weighted filter
diff -r 87dbfdda0769 -r 894e47d258a7 source/common/vec/ipfilter.inc
--- a/source/common/vec/ipfilter.inc Mon Aug 05 15:34:57 2013 +0530
+++ b/source/common/vec/ipfilter.inc Mon Aug 05 15:53:11 2013 +0530
@@ -64,6 +64,7 @@
p.filterVmulti = filterVerticalMultiplaneExtend;
#if !(defined(_MSC_VER) && _MSC_VER == 1500 && X86_64)
p.filterHmulti = filterHorizontalMultiplaneExtend;
+ p.filterHwghtd = filterHorizontalWeighted;
#endif
#endif
}
diff -r 87dbfdda0769 -r 894e47d258a7 source/common/vec/ipfilter8.inc
--- a/source/common/vec/ipfilter8.inc Mon Aug 05 15:34:57 2013 +0530
+++ b/source/common/vec/ipfilter8.inc Mon Aug 05 15:53:11 2013 +0530
@@ -1136,6 +1136,476 @@
memcpy(pp - y * dstStride, pp, block_width + marginX * 2);
}
}
+
+void filterHorizontalWeighted(pixel *src, int srcStride,
+ short *intF, short* intA, short* intB, short* intC, int intStride,
+ pixel * dstF, pixel *dstA, pixel *dstB, pixel *dstC, int dstStride,
+ int block_width, int block_height,
+ int marginX, int marginY, int scale, int wround, int wshift, int woffset)
+{
+ int row, col;
+ int headRoom = IF_INTERNAL_PREC - X265_DEPTH;
+ int shift = IF_FILTER_PREC - headRoom;
+ int offset = -IF_INTERNAL_OFFS << shift;
+
+ src -= (8 / 2 - 1);
+ __m128i vec_src0;
+ __m128i vec_offset = _mm_set1_epi16(offset);
+ __m128i sumaL, sumbL, sumcL, tmp, exp1;
+ __m128i tmp16a, tmp16b, tmp16c, tmp16f, tmpwlo, tmpwhi;
+
+ Int shiftNum = IF_INTERNAL_PREC - X265_DEPTH;
+ wshift = wshift + shiftNum;
+ wround = wshift ? (1 << (wshift - 1)) : 0;
+
+ __m128i iofs = _mm_set1_epi32(IF_INTERNAL_OFFS);
+ __m128i vround = _mm_set1_epi32(wround);
+ __m128i ofs = _mm_set1_epi32(woffset);
+ __m128i vscale = _mm_set1_epi32(scale);
+
+ // Load Ai, ai += Ai*coefi
+ for (row = 0; row < block_height; row++)
+ {
+ col = 0;
+
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col));
+ sumbL = (_mm_unpacklo_epi8(vec_src0, _mm_setzero_si128()));
+ sumbL = _mm_sub_epi16(_mm_setzero_si128(), sumbL);
+
+ // a = b+=4*a1, c+=1*a1
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 1));
+ sumcL = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_add_epi16(sumbL, _mm_sll_epi16(sumcL, _mm_cvtsi32_si128(2)));
+ sumaL = sumbL;
+
+ // a +=-10*a2 b+=-11*a2 c+=-5*a2
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 2));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_sub_epi16(sumbL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(-5));
+ sumcL = _mm_add_epi16(sumcL, tmp);
+ tmp = _mm_sll_epi16(tmp, _mm_cvtsi32_si128(1));
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ sumbL = _mm_add_epi16(sumbL, tmp);
+
+ // a +=58*a3 b+=40*a3 c+=17*a3
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 3));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ tmp16f = _mm_sub_epi16(_mm_sll_epi16(tmp, _mm_cvtsi32_si128(6)), _mm_set1_epi16(IF_INTERNAL_OFFS));
+ _mm_storeu_si128((__m128i*)(intF + col), tmp16f);
+ //Apply weight on Full pel
+ tmpwlo = _mm_unpacklo_epi16(tmp16f, _mm_srai_epi16(tmp16f, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(tmp16f, _mm_srai_epi16(tmp16f, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16f = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstF + row * dstStride + col), tmp16f);
+
+ exp1 = _mm_add_epi16(tmp, _mm_sll_epi16(tmp, _mm_cvtsi32_si128(4)));
+ sumcL = _mm_add_epi16(sumcL, exp1);
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(40));
+ sumbL = _mm_add_epi16(sumbL, tmp);
+ sumaL = _mm_add_epi16(sumaL, _mm_add_epi16(exp1, tmp));
+
+ // a +=17*a4 b+=40*a4 c+=58*a4
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 4));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ exp1 = _mm_add_epi16(tmp, _mm_sll_epi16(tmp, _mm_cvtsi32_si128(4)));
+ sumaL = _mm_add_epi16(sumaL, exp1);
+ sumcL = _mm_add_epi16(sumcL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(40));
+ sumbL = _mm_add_epi16(sumbL, tmp);
+ sumcL = _mm_add_epi16(sumcL, _mm_add_epi16(exp1, tmp));
+
+ // a +=-5*a5 b+=-11*a5 c+=-10*a5
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 5));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_sub_epi16(sumbL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(-5));
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ tmp = _mm_sll_epi16(tmp, _mm_cvtsi32_si128(1));
+ sumcL = _mm_add_epi16(sumcL, tmp);
+ sumbL = _mm_add_epi16(sumbL, tmp);
+
+ // a +=1*a6 b+=4*a6 c+=4*a6
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 6));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ tmp = _mm_sll_epi16(tmp, _mm_cvtsi32_si128(2));
+ sumbL = _mm_add_epi16(sumbL, tmp);
+ sumcL = _mm_add_epi16(sumcL, tmp);
+
+ // a +=0*a7 b+=-1*a7 c+=-1*a7
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 7));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_sub_epi16(sumbL, tmp);
+ sumcL = _mm_sub_epi16(sumcL, tmp);
+ sumaL = _mm_add_epi16(sumaL, vec_offset);
+ sumbL = _mm_add_epi16(sumbL, vec_offset);
+ sumcL = _mm_add_epi16(sumcL, vec_offset);
+
+ _mm_storeu_si128((__m128i*)(intA + col), sumaL);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(sumaL, _mm_srai_epi16(sumaL, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(sumaL, _mm_srai_epi16(sumaL, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16a = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstA + row * dstStride + col), tmp16a);
+
+ _mm_storeu_si128((__m128i*)(intB + col), sumbL);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(sumbL, _mm_srai_epi16(sumbL, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(sumbL, _mm_srai_epi16(sumbL, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16b = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstB + row * dstStride + col), tmp16b);
+
+ _mm_storeu_si128((__m128i*)(intC + col), sumcL);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(sumcL, _mm_srai_epi16(sumcL, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(sumcL, _mm_srai_epi16(sumcL, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16c = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstC + row * dstStride + col), tmp16c);
+
+ // Extend First column
+ __m128i ma, mb, mc, mf;
+ mf = _mm_shuffle_epi8(tmp16f, _mm_set1_epi8(0));
+ ma = _mm_shuffle_epi8(tmp16a, _mm_set1_epi8(0));
+ mb = _mm_shuffle_epi8(tmp16b, _mm_set1_epi8(0));
+ mc = _mm_shuffle_epi8(tmp16c, _mm_set1_epi8(0));
+
+ for (int i = -marginX; i < -16; i += 16)
+ {
+ _mm_storeu_si128((__m128i*)(dstF + row * dstStride + i), mf);
+ _mm_storeu_si128((__m128i*)(dstA + row * dstStride + i), ma);
+ _mm_storeu_si128((__m128i*)(dstB + row * dstStride + i), mb);
+ _mm_storeu_si128((__m128i*)(dstC + row * dstStride + i), mc);
+ }
+
+ _mm_storeu_si128((__m128i*)(dstF + row * dstStride - 16), mf); /*Assuming marginX > 16*/
+ _mm_storeu_si128((__m128i*)(dstA + row * dstStride - 16), ma);
+ _mm_storeu_si128((__m128i*)(dstB + row * dstStride - 16), mb);
+ _mm_storeu_si128((__m128i*)(dstC + row * dstStride - 16), mc);
+
+ col += 8;
+
+ for (; col + 8 /*16*/ <= (block_width); col += 8 /*16*/) // Iterations multiple of 8
+ {
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col));
+ sumbL = (_mm_unpacklo_epi8(vec_src0, _mm_setzero_si128()));
+ sumbL = _mm_sub_epi16(_mm_setzero_si128(), sumbL);
+
+ // a = b+=4*a1, c+=1*a1
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 1));
+ sumcL = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_add_epi16(sumbL, _mm_sll_epi16(sumcL, _mm_cvtsi32_si128(2)));
+ sumaL = sumbL;
+
+ // a +=-10*a2 b+=-11*a2 c+=-5*a2
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 2));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_sub_epi16(sumbL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(-5));
+ sumcL = _mm_add_epi16(sumcL, tmp);
+ tmp = _mm_sll_epi16(tmp, _mm_cvtsi32_si128(1));
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ sumbL = _mm_add_epi16(sumbL, tmp);
+
+ // a +=58*a3 b+=40*a3 c+=17*a3
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 3));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ tmp16f = _mm_sub_epi16(_mm_sll_epi16(tmp, _mm_cvtsi32_si128(6)), _mm_set1_epi16(IF_INTERNAL_OFFS));
+ _mm_storeu_si128((__m128i*)(intF + col), tmp16f);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(tmp16f, _mm_srai_epi16(tmp16f, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(tmp16f, _mm_srai_epi16(tmp16f, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16f = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstF + row * dstStride + col), tmp16f);
+
+ exp1 = _mm_add_epi16(tmp, _mm_sll_epi16(tmp, _mm_cvtsi32_si128(4)));
+ sumcL = _mm_add_epi16(sumcL, exp1);
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(40));
+ sumbL = _mm_add_epi16(sumbL, tmp);
+ sumaL = _mm_add_epi16(sumaL, _mm_add_epi16(exp1, tmp));
+
+ // a +=17*a4 b+=40*a4 c+=58*a4
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 4));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ exp1 = _mm_add_epi16(tmp, _mm_sll_epi16(tmp, _mm_cvtsi32_si128(4)));
+ sumaL = _mm_add_epi16(sumaL, exp1);
+ sumcL = _mm_add_epi16(sumcL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(40));
+ sumbL = _mm_add_epi16(sumbL, tmp);
+ sumcL = _mm_add_epi16(sumcL, _mm_add_epi16(exp1, tmp));
+
+ // a +=-5*a5 b+=-11*a5 c+=-10*a5
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 5));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_sub_epi16(sumbL, tmp);
+ tmp = _mm_mullo_epi16(tmp, _mm_set1_epi16(-5));
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ tmp = _mm_sll_epi16(tmp, _mm_cvtsi32_si128(1));
+ sumcL = _mm_add_epi16(sumcL, tmp);
+ sumbL = _mm_add_epi16(sumbL, tmp);
+
+ // a +=1*a6 b+=4*a6 c+=4*a6
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 6));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumaL = _mm_add_epi16(sumaL, tmp);
+ tmp = _mm_sll_epi16(tmp, _mm_cvtsi32_si128(2));
+ sumbL = _mm_add_epi16(sumbL, tmp);
+ sumcL = _mm_add_epi16(sumcL, tmp);
+
+ // a +=0*a7 b+=-1*a7 c+=-1*a7
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col + 7));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ sumbL = _mm_sub_epi16(sumbL, tmp);
+ sumcL = _mm_sub_epi16(sumcL, tmp);
+ sumaL = _mm_add_epi16(sumaL, vec_offset);
+ sumbL = _mm_add_epi16(sumbL, vec_offset);
+ sumcL = _mm_add_epi16(sumcL, vec_offset);
+
+ _mm_storeu_si128((__m128i*)(intA + col), sumaL);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(sumaL, _mm_srai_epi16(sumaL, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(sumaL, _mm_srai_epi16(sumaL, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16a = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstA + row * dstStride + col), tmp16a);
+
+ _mm_storeu_si128((__m128i*)(intB + col), sumbL);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(sumbL, _mm_srai_epi16(sumbL, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(sumbL, _mm_srai_epi16(sumbL, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16b = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstB + row * dstStride + col), tmp16b);
+
+ _mm_storeu_si128((__m128i*)(intC + col), sumcL);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(sumcL, _mm_srai_epi16(sumcL, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(sumcL, _mm_srai_epi16(sumcL, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp16c = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstC + row * dstStride + col), tmp16c);
+ }
+
+ if (block_width - col > 0)
+ {
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + block_width - 5));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128());
+ tmp = _mm_sub_epi16(_mm_sll_epi16(tmp, _mm_cvtsi32_si128(6)), _mm_set1_epi16(IF_INTERNAL_OFFS));
+ _mm_storeu_si128((__m128i*)(intF + block_width - 8), tmp);
+ //Apply weight
+ tmpwlo = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ tmpwhi = _mm_unpackhi_epi16(tmp, _mm_srai_epi16(tmp, 15));
+ tmpwhi = _mm_add_epi32(tmpwhi, iofs);
+ tmpwhi = _mm_mullo_epi32(tmpwhi, vscale);
+ tmpwhi = _mm_add_epi32(tmpwhi, vround);
+ tmpwhi = _mm_sra_epi32(tmpwhi, _mm_cvtsi32_si128(wshift));
+ tmpwhi = _mm_add_epi32(tmpwhi, ofs);
+ tmp = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwhi), _mm_setzero_si128());
+ _mm_storel_epi64((__m128i*)(dstF + row * dstStride + block_width - 8), tmp);
+ tmp16f = _mm_shuffle_epi8(tmp, _mm_set1_epi8(7));
+
+ __m128i a, b, c, sum1, sum2, sum3 = _mm_setzero_si128();
+ for (; col < block_width; col++) // Remaining iterations
+ {
+ vec_src0 = _mm_loadu_si128((__m128i const*)(src + col));
+ tmp = _mm_unpacklo_epi8(vec_src0, _mm_setzero_si128()); // Assuming that there is no overflow (Everywhere in this function!)
+ a = _mm_setr_epi16(-1, 4, -10, 58, 17, -5, 1, 0);
+ a = _mm_mullo_epi16(tmp, a);
+ b = _mm_setr_epi16(-1, 4, -11, 40, 40, -11, 4, -1);
+ b = _mm_mullo_epi16(tmp, b);
+ c = _mm_setr_epi16(0, 1, -5, 17, 58, -10, 4, -1);
+ c = _mm_mullo_epi16(tmp, c);
+ sum1 = _mm_hadd_epi16(a, b); // horizontally add 8 elements in 3 steps
+ sum2 = _mm_hadd_epi16(c, c);
+ sum2 = _mm_hadd_epi16(sum1, sum2);
+ sum3 = _mm_hadd_epi16(sum2, sum2);
+ sum3 = _mm_add_epi16(sum3, vec_offset);
+ sum3 = _mm_sra_epi16(sum3, _mm_cvtsi32_si128(shift));
+ intA[col] = _mm_cvtsi128_si32(sum3);
+ intB[col] = _mm_extract_epi16(sum3, 1);
+ intC[col] = _mm_extract_epi16(sum3, 2);
+
+ tmpwlo = _mm_unpacklo_epi16(sum3, _mm_srai_epi16(sum3, 15));
+ tmpwlo = _mm_add_epi32(tmpwlo, iofs);
+ tmpwlo = _mm_mullo_epi32(tmpwlo, vscale);
+ tmpwlo = _mm_add_epi32(tmpwlo, vround);
+ tmpwlo = _mm_sra_epi32(tmpwlo, _mm_cvtsi32_si128(wshift));
+ tmpwlo = _mm_add_epi32(tmpwlo, ofs);
+ sum3 = _mm_packus_epi16(_mm_packs_epi32(tmpwlo, tmpwlo), _mm_setzero_si128());
+
+ dstA[row * dstStride + col] = _mm_extract_epi8(sum3, 0);
+ dstB[row * dstStride + col] = _mm_extract_epi8(sum3, 1);
+ dstC[row * dstStride + col] = _mm_extract_epi8(sum3, 2);
+ }
+
+ tmp16a = _mm_shuffle_epi8(sum3, _mm_set1_epi8(0));
+ tmp16b = _mm_shuffle_epi8(sum3, _mm_set1_epi8(1));
+ tmp16c = _mm_shuffle_epi8(sum3, _mm_set1_epi8(2));
+ }
+ else
+ {
+ tmp16f = _mm_shuffle_epi8(tmp16f, _mm_set1_epi8(7));
+ tmp16a = _mm_shuffle_epi8(tmp16a, _mm_set1_epi8(7));
+ tmp16b = _mm_shuffle_epi8(tmp16b, _mm_set1_epi8(7));
+ tmp16c = _mm_shuffle_epi8(tmp16c, _mm_set1_epi8(7));
+ }
+ // Extend last column
+ for (int i = -marginX; i < -16; i += 16)
+ {
+ _mm_storeu_si128((__m128i*)(dstF + row * dstStride + block_width + marginX + i), tmp16f);
+ _mm_storeu_si128((__m128i*)(dstA + row * dstStride + block_width + marginX + i), tmp16a);
+ _mm_storeu_si128((__m128i*)(dstB + row * dstStride + block_width + marginX + i), tmp16b);
+ _mm_storeu_si128((__m128i*)(dstC + row * dstStride + block_width + marginX + i), tmp16c);
+ }
+
+ _mm_storeu_si128((__m128i*)(dstF + row * dstStride + block_width + marginX - 16), tmp16f); /*Assuming marginX > 16*/
+ _mm_storeu_si128((__m128i*)(dstA + row * dstStride + block_width + marginX - 16), tmp16a);
+ _mm_storeu_si128((__m128i*)(dstB + row * dstStride + block_width + marginX - 16), tmp16b);
+ _mm_storeu_si128((__m128i*)(dstC + row * dstStride + block_width + marginX - 16), tmp16c);
+
+ src += srcStride;
+ intF += intStride;
+ intA += intStride;
+ intB += intStride;
+ intC += intStride;
+ }
+
+ // Extending bottom rows
+ pixel *pe, *pi, *pp, *pf;
+ pf = dstF + (block_height - 1) * dstStride - marginX;
+ pe = dstA + (block_height - 1) * dstStride - marginX;
+ pi = dstB + (block_height - 1) * dstStride - marginX;
+ pp = dstC + (block_height - 1) * dstStride - marginX;
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pf + y * dstStride, pf, block_width + marginX * 2);
+ }
+
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pe + y * dstStride, pe, block_width + marginX * 2);
+ }
+
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pi + y * dstStride, pi, block_width + marginX * 2);
+ }
+
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pp + y * dstStride, pp, block_width + marginX * 2);
+ }
+
+ // Extending top rows
+ pf = dstF - marginX;
+ pe = dstA - marginX;
+ pi = dstB - marginX;
+ pp = dstC - marginX;
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pf - y * dstStride, pf, block_width + marginX * 2);
+ }
+
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pe - y * dstStride, pe, block_width + marginX * 2);
+ }
+
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pi - y * dstStride, pi, block_width + marginX * 2);
+ }
+
+ for (int y = 1; y <= marginY; y++)
+ {
+ memcpy(pp - y * dstStride, pp, block_width + marginX * 2);
+ }
+}
+
#endif /* if INSTRSET >= X265_CPU_LEVEL_SSE41 */
#if INSTRSET >= X265_CPU_LEVEL_SSSE3
-------------- next part --------------
A non-text attachment was scrubbed...
Name: x265-2.patch
Type: text/x-patch
Size: 24437 bytes
Desc: not available
URL: <http://mailman.videolan.org/private/x265-devel/attachments/20130805/dd77b688/attachment-0001.bin>
More information about the x265-devel
mailing list