[vlc-commits] i420_rgb,i420_yuy2,i422_yuy2: fix and clarify SIMD documentation

Lyndon Brown git at videolan.org
Wed Mar 6 17:13:30 CET 2019


vlc | branch: master | Lyndon Brown <jnqnfe at gmail.com> | Sun Jan 20 06:39:06 2019 +0000| [db91a47a24884b607cb62d291ab9fe9f9f0afa89] | committer: Jean-Baptiste Kempf

i420_rgb,i420_yuy2,i422_yuy2: fix and clarify SIMD documentation

** documentation fixes only! **

includes...

SSE:
 - Fixes stated Y/Cb/Cr counts of 8:4:4 to 16:8:8 (SSE2 is 128 bit, those
   were the counts from the MMX version)
 - Adjusts byte layout per being 16 bytes, for correctness and clarity
 - Fixes mistakes in the latter lines of SSE2_UNPACK_32_ABGR_UNALIGNED
   and SSE2_UNPACK_32_ABGR_ALIGNED of i420_rgb
 - Fixes swapped G and B in comments of store lines in
   SSE2_UNPACK_32_ARGB_ALIGNED and SSE2_UNPACK_32_ARGB_UNALIGNED in same
 - Adds note to clarify that 15/16 bit unpacking is showing bit layout for
   much of the documentation, whereas most is byte layout, which left me
   confused for a while, and clarified the conversion comment (nothing to
   do with RGB24 format as could be confused) (i420_rgb)
 - Fixed position of red bits in conversion for RV15, which seem to have
   ignored the right shift done to xmm1 earlier
 - Fixed "pixels 4-7" -> "pixels 8-15"
MMX (i420_rgb only)
 - Fixed alignment
 - Added 15/16 bit note about bit layout and clarified conversion as above
 - Fixed "pixels 0-3" -> "pixels 4-7"
 - Fixed position of red bits in conversion for RV15, as above
 - Used zeros instead of underscores in byte layout for consistency

Signed-off-by: Jean-Baptiste Kempf <jb at videolan.org>

> http://git.videolan.org/gitweb.cgi/vlc.git/?a=commit;h=db91a47a24884b607cb62d291ab9fe9f9f0afa89
---

 modules/video_chroma/i420_rgb_mmx.h  | 154 +++++------
 modules/video_chroma/i420_rgb_sse2.h | 478 +++++++++++++++++------------------
 modules/video_chroma/i420_yuy2.h     | 194 +++++++-------
 modules/video_chroma/i422_yuy2.h     | 126 ++++-----
 4 files changed, 476 insertions(+), 476 deletions(-)

diff --git a/modules/video_chroma/i420_rgb_mmx.h b/modules/video_chroma/i420_rgb_mmx.h
index bdbe00551b..521a830ce9 100644
--- a/modules/video_chroma/i420_rgb_mmx.h
+++ b/modules/video_chroma/i420_rgb_mmx.h
@@ -87,31 +87,31 @@ movq      (%0), %%mm6       # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
  * mm6 -> Y even, mm7 -> Y odd
  */
 
-#define MMX_YUV_MUL "                                                       \n\
-# convert the chroma part                                                   \n\
-punpcklbw %%mm4, %%mm0          # scatter 4 Cb    00 u3 00 u2 00 u1 00 u0   \n\
-punpcklbw %%mm4, %%mm1          # scatter 4 Cr    00 v3 00 v2 00 v1 00 v0   \n\
-psubsw    %4, %%mm0     # Cb -= 128                                 \n\
-psubsw    %4, %%mm1     # Cr -= 128                                 \n\
-psllw     $3, %%mm0             # Promote precision                         \n\
-psllw     $3, %%mm1             # Promote precision                         \n\
-movq      %%mm0, %%mm2          # Copy 4 Cb       00 u3 00 u2 00 u1 00 u0   \n\
-movq      %%mm1, %%mm3          # Copy 4 Cr       00 v3 00 v2 00 v1 00 v0   \n\
-pmulhw    %8, %%mm2 # Mul Cb with green coeff -> Cb green       \n\
-pmulhw    %11, %%mm3 # Mul Cr with green coeff -> Cr green       \n\
-pmulhw    %9, %%mm0  # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
-pmulhw    %10, %%mm1   # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
-paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen             \n\
-                                                                            \n\
-# convert the luma part                                                     \n\
-psubusb   %5, %%mm6     # Y -= 16                                   \n\
-movq      %%mm6, %%mm7          # Copy 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
-pand      %6, %%mm6   # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
-psrlw     $8, %%mm7             # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
-psllw     $3, %%mm6             # Promote precision                         \n\
-psllw     $3, %%mm7             # Promote precision                         \n\
-pmulhw    %7, %%mm6 # Mul 4 Y even    00 y6 00 y4 00 y2 00 y0   \n\
-pmulhw    %7, %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
+#define MMX_YUV_MUL "                                                        \n\
+# convert the chroma part                                                    \n\
+punpcklbw %%mm4, %%mm0          # scatter 4 Cb     00 u3 00 u2 00 u1 00 u0   \n\
+punpcklbw %%mm4, %%mm1          # scatter 4 Cr     00 v3 00 v2 00 v1 00 v0   \n\
+psubsw    %4, %%mm0             # Cb -= 128                                  \n\
+psubsw    %4, %%mm1             # Cr -= 128                                  \n\
+psllw     $3, %%mm0             # Promote precision                          \n\
+psllw     $3, %%mm1             # Promote precision                          \n\
+movq      %%mm0, %%mm2          # Copy 4 Cb        00 u3 00 u2 00 u1 00 u0   \n\
+movq      %%mm1, %%mm3          # Copy 4 Cr        00 v3 00 v2 00 v1 00 v0   \n\
+pmulhw    %8, %%mm2             # Mul Cb with green coeff -> Cb green        \n\
+pmulhw    %11, %%mm3            # Mul Cr with green coeff -> Cr green        \n\
+pmulhw    %9, %%mm0             # Mul Cb -> Cblue  00 b3 00 b2 00 b1 00 b0   \n\
+pmulhw    %10, %%mm1            # Mul Cr -> Cred   00 r3 00 r2 00 r1 00 r0   \n\
+paddsw    %%mm3, %%mm2          # Cb green + Cr green -> Cgreen              \n\
+                                                                             \n\
+# convert the luma part                                                      \n\
+psubusb   %5, %%mm6             # Y -= 16                                    \n\
+movq      %%mm6, %%mm7          # Copy 8 Y         Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
+pand      %6, %%mm6             # get Y even       00 Y6 00 Y4 00 Y2 00 Y0   \n\
+psrlw     $8, %%mm7             # get Y odd        00 Y7 00 Y5 00 Y3 00 Y1   \n\
+psllw     $3, %%mm6             # Promote precision                          \n\
+psllw     $3, %%mm7             # Promote precision                          \n\
+pmulhw    %7, %%mm6             # Mul 4 Y even     00 y6 00 y4 00 y2 00 y0   \n\
+pmulhw    %7, %%mm7             # Mul 4 Y odd      00 y7 00 y5 00 y3 00 y1   \n\
 "
 
 /*
@@ -122,32 +122,32 @@ pmulhw    %7, %%mm7 # Mul 4 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
  * mm6 -> Y even, mm7 -> Y odd
  */
 
-#define MMX_YUV_ADD "                                                       \n\
-# Do horizontal and vertical scaling                                        \n\
-movq      %%mm0, %%mm3          # Copy Cblue                                \n\
-movq      %%mm1, %%mm4          # Copy Cred                                 \n\
-movq      %%mm2, %%mm5          # Copy Cgreen                               \n\
-paddsw    %%mm6, %%mm0          # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
-paddsw    %%mm7, %%mm3          # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
-paddsw    %%mm6, %%mm1          # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
-paddsw    %%mm7, %%mm4          # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
-paddsw    %%mm6, %%mm2          # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
-paddsw    %%mm7, %%mm5          # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
-                                                                            \n\
-# Limit RGB even to 0..255                                                  \n\
-packuswb  %%mm0, %%mm0          # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
-packuswb  %%mm1, %%mm1          # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
-packuswb  %%mm2, %%mm2          # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
-                                                                            \n\
-# Limit RGB odd to 0..255                                                   \n\
-packuswb  %%mm3, %%mm3          # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
-packuswb  %%mm4, %%mm4          # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
-packuswb  %%mm5, %%mm5          # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
-                                                                            \n\
-# Interleave RGB even and odd                                               \n\
-punpcklbw %%mm3, %%mm0          #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
-punpcklbw %%mm4, %%mm1          #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
-punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
+#define MMX_YUV_ADD "                                                        \n\
+# Do horizontal and vertical scaling                                         \n\
+movq      %%mm0, %%mm3          # Copy Cblue                                 \n\
+movq      %%mm1, %%mm4          # Copy Cred                                  \n\
+movq      %%mm2, %%mm5          # Copy Cgreen                                \n\
+paddsw    %%mm6, %%mm0          # Y even + Cblue   00 B6 00 B4 00 B2 00 B0   \n\
+paddsw    %%mm7, %%mm3          # Y odd  + Cblue   00 B7 00 B5 00 B3 00 B1   \n\
+paddsw    %%mm6, %%mm1          # Y even + Cred    00 R6 00 R4 00 R2 00 R0   \n\
+paddsw    %%mm7, %%mm4          # Y odd  + Cred    00 R7 00 R5 00 R3 00 R1   \n\
+paddsw    %%mm6, %%mm2          # Y even + Cgreen  00 G6 00 G4 00 G2 00 G0   \n\
+paddsw    %%mm7, %%mm5          # Y odd  + Cgreen  00 G7 00 G5 00 G3 00 G1   \n\
+                                                                             \n\
+# Limit RGB even to 0..255                                                   \n\
+packuswb  %%mm0, %%mm0          #                  B6 B4 B2 B0 B6 B4 B2 B0   \n\
+packuswb  %%mm1, %%mm1          #                  R6 R4 R2 R0 R6 R4 R2 R0   \n\
+packuswb  %%mm2, %%mm2          #                  G6 G4 G2 G0 G6 G4 G2 G0   \n\
+                                                                             \n\
+# Limit RGB odd to 0..255                                                    \n\
+packuswb  %%mm3, %%mm3          #                  B7 B5 B3 B1 B7 B5 B3 B1   \n\
+packuswb  %%mm4, %%mm4          #                  R7 R5 R3 R1 R7 R5 R3 R1   \n\
+packuswb  %%mm5, %%mm5          #                  G7 G5 G3 G1 G7 G5 G3 G1   \n\
+                                                                             \n\
+# Interleave RGB even and odd                                                \n\
+punpcklbw %%mm3, %%mm0          #                  B7 B6 B5 B4 B3 B2 B1 B0   \n\
+punpcklbw %%mm4, %%mm1          #                  R7 R6 R5 R4 R3 R2 R1 R0   \n\
+punpcklbw %%mm5, %%mm2          #                  G7 G6 G5 G4 G3 G2 G1 G0   \n\
 "
 
 /*
@@ -156,14 +156,14 @@ punpcklbw %%mm5, %%mm2          #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
 
 #define MMX_YUV_GRAY "                                                      \n\
 # convert the luma part                                                     \n\
-psubusb   %5, %%mm6                                                 \n\
+psubusb   %5, %%mm6                                                         \n\
 movq      %%mm6, %%mm7                                                      \n\
-pand      %6, %%mm6                                               \n\
+pand      %6, %%mm6                                                         \n\
 psrlw     $8, %%mm7                                                         \n\
 psllw     $3, %%mm6                                                         \n\
 psllw     $3, %%mm7                                                         \n\
-pmulhw    %7, %%mm6                                             \n\
-pmulhw    %7, %%mm7                                             \n\
+pmulhw    %7, %%mm6                                                         \n\
+pmulhw    %7, %%mm7                                                         \n\
 packuswb  %%mm6, %%mm6                                                      \n\
 packuswb  %%mm7, %%mm7                                                      \n\
 punpcklbw %%mm7, %%mm6                                                      \n\
@@ -171,8 +171,8 @@ punpcklbw %%mm7, %%mm6                                                      \n\
 
 #define MMX_UNPACK_16_GRAY "                                                \n\
 movq      %%mm6, %%mm5                                                      \n\
-pand      %12, %%mm6                                             \n\
-pand      %13, %%mm5                                             \n\
+pand      %12, %%mm6                                                        \n\
+pand      %13, %%mm5                                                        \n\
 movq      %%mm6, %%mm7                                                      \n\
 psrlw     $3, %%mm7                                                         \n\
 pxor      %%mm3, %%mm3                                                      \n\
@@ -199,32 +199,32 @@ movq      %%mm2, 8(%3)                                                      \n\
  * mm6 -> GB, mm7 -> AR pixel 0-3
  */
 
-#define MMX_UNPACK_15 "                                                     \n\
+#define MMX_UNPACK_15 "         # Note, much of this shows bit patterns (of a pair of bytes) \n\
 # mask unneeded bits off                                                    \n\
-pand      %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
+pand      %12, %%mm0            # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
-pand      %12, %%mm2 # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
-pand      %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
+pand      %12, %%mm2            # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
+pand      %12, %%mm1            # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
 psrlw     $1,%%mm1              # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
                                                                             \n\
-# convert rgb24 plane to rgb15 pack for pixel 0-3                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 0-3                       \n\
 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3______       \n\
-punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+punpcklbw %%mm1, %%mm0          # __r7r6r5 r4r3____ ______b7 b6b5b4b3       \n\
 psllw     $2,%%mm2              # ________ ____g7g6 g5g4g3__ ________       \n\
-por       %%mm2, %%mm0          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
+por       %%mm2, %%mm0          # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3       \n\
 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
                                                                             \n\
-# convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 4-7                       \n\
 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3______       \n\
-punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+punpckhbw %%mm1, %%mm5          # __r7r6r5 r4r3____ ______b7 b6b5b4b3       \n\
 psllw     $2,%%mm7              # ________ ____g7g6 g5g4g3__ ________       \n\
-movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
-por       %%mm7, %%mm5          # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
-movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
+movd      4(%1), %%mm0          # Load 4 Cb       00 00 00 00 u3 u2 u1 u0   \n\
+por       %%mm7, %%mm5          # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3       \n\
+movd      4(%2), %%mm1          # Load 4 Cr       00 00 00 00 v3 v2 v1 v0   \n\
 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
 "
 
@@ -235,17 +235,17 @@ movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
  * mm6 -> GB, mm7 -> AR pixel 0-3
  */
 
-#define MMX_UNPACK_16 "                                                     \n\
+#define MMX_UNPACK_16 "         # Note, much of this shows bit patterns (of a pair of bytes) \n\
 # mask unneeded bits off                                                    \n\
-pand      %12, %%mm0 # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
-pand      %13, %%mm2 # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
-pand      %12, %%mm1 # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
+pand      %12, %%mm0            # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
+pand      %13, %%mm2            # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
+pand      %12, %%mm1            # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
 psrlw     $3,%%mm0              # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
 pxor      %%mm4, %%mm4          # zero mm4                                  \n\
 movq      %%mm0, %%mm5          # Copy B7-B0                                \n\
 movq      %%mm2, %%mm7          # Copy G7-G0                                \n\
                                                                             \n\
-# convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 0-3                       \n\
 punpcklbw %%mm4, %%mm2          # ________ ________ g7g6g5g4 g3g2____       \n\
 punpcklbw %%mm1, %%mm0          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
 psllw     $3,%%mm2              # ________ __g7g6g5 g4g3g2__ ________       \n\
@@ -253,13 +253,13 @@ por       %%mm2, %%mm0          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
 movq      8(%0), %%mm6          # Load 8 Y        Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
 movq      %%mm0, (%3)           # store pixel 0-3                           \n\
                                                                             \n\
-# convert rgb24 plane to rgb16 pack for pixel 0-3                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 4-7                       \n\
 punpckhbw %%mm4, %%mm7          # ________ ________ g7g6g5g4 g3g2____       \n\
 punpckhbw %%mm1, %%mm5          # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
 psllw     $3,%%mm7              # ________ __g7g6g5 g4g3g2__ ________       \n\
-movd      4(%1), %%mm0          # Load 4 Cb       __ __ __ __ u3 u2 u1 u0   \n\
+movd      4(%1), %%mm0          # Load 4 Cb       00 00 00 00 u3 u2 u1 u0   \n\
 por       %%mm7, %%mm5          # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
-movd      4(%2), %%mm1          # Load 4 Cr       __ __ __ __ v3 v2 v1 v0   \n\
+movd      4(%2), %%mm1          # Load 4 Cr       00 00 00 00 v3 v2 v1 v0   \n\
 movq      %%mm5, 8(%3)          # store pixel 4-7                           \n\
 "
 
diff --git a/modules/video_chroma/i420_rgb_sse2.h b/modules/video_chroma/i420_rgb_sse2.h
index b2092fae23..f8de7aba54 100644
--- a/modules/video_chroma/i420_rgb_sse2.h
+++ b/modules/video_chroma/i420_rgb_sse2.h
@@ -1,5 +1,5 @@
 /*****************************************************************************
- * i420_rgb_sse2.h: MMX YUV transformation assembly
+ * i420_rgb_sse2.h: SSE2 YUV transformation assembly
  *****************************************************************************
  * Copyright (C) 1999-2012 VLC authors and VideoLAN
  *
@@ -37,85 +37,85 @@
 
 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
 
-#define SSE2_INIT_16_ALIGNED "                                              \n\
-movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
-movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
-pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
-movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
+#define SSE2_INIT_16_ALIGNED "                                        \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 ... u2 u1 u0   \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 ... v2 v1 v0   \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                \n\
+movdqa      (%0), %%xmm6    # Load 16 Y       YF YE YD ... Y2 Y1 Y0   \n\
 "
 
-#define SSE2_INIT_16_UNALIGNED "                                            \n\
-movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
-movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
-pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
-movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
-prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
+#define SSE2_INIT_16_UNALIGNED "                                      \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 ... u2 u1 u0   \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 ... v2 v1 v0   \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                \n\
+movdqu      (%0), %%xmm6    # Load 16 Y       YF YE YD ... Y2 Y1 Y0   \n\
+prefetchnta (%3)            # Tell CPU not to cache output RGB data   \n\
 "
 
-#define SSE2_INIT_32_ALIGNED "                                              \n\
-movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
-movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
-pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
-movdqa      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
+#define SSE2_INIT_32_ALIGNED "                                        \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 ... u2 u1 u0   \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 ... v2 v1 v0   \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                \n\
+movdqa      (%0), %%xmm6    # Load 16 Y       YF YE YD ... Y2 Y1 Y0   \n\
 "
 
-#define SSE2_INIT_32_UNALIGNED "                                            \n\
-movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 00 u3 u2 u1 u0       \n\
-movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 00 v3 v2 v1 v0       \n\
-pxor      %%xmm4, %%xmm4    # zero mm4                                      \n\
-movdqu      (%0), %%xmm6    # Load 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0       \n\
-prefetchnta (%3)            # Tell CPU not to cache output RGB data         \n\
+#define SSE2_INIT_32_UNALIGNED "                                      \n\
+movq        (%1), %%xmm0    # Load 8 Cb       00 00 00 ... u2 u1 u0   \n\
+movq        (%2), %%xmm1    # Load 8 Cr       00 00 00 ... v2 v1 v0   \n\
+pxor      %%xmm4, %%xmm4    # zero mm4                                \n\
+movdqu      (%0), %%xmm6    # Load 16 Y       YF YE YD ... Y2 Y1 Y0   \n\
+prefetchnta (%3)            # Tell CPU not to cache output RGB data   \n\
 "
 
 #define SSE2_YUV_MUL "                                                      \n\
 # convert the chroma part                                                   \n\
-punpcklbw %%xmm4, %%xmm0        # scatter 8 Cb    00 u3 00 u2 00 u1 00 u0   \n\
-punpcklbw %%xmm4, %%xmm1        # scatter 8 Cr    00 v3 00 v2 00 v1 00 v0   \n\
+punpcklbw %%xmm4, %%xmm0        # scatter 8 Cb    00 u7 ... 00 u1 00 u0     \n\
+punpcklbw %%xmm4, %%xmm1        # scatter 8 Cr    00 v7 ... 00 v1 00 v0     \n\
 movl      $0x00800080, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     0080 0080 ... 0080 0080   \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     00 80 ... 00 80 00 80     \n\
 psubsw    %%xmm5, %%xmm0        # Cb -= 128                                 \n\
 psubsw    %%xmm5, %%xmm1        # Cr -= 128                                 \n\
 psllw     $3, %%xmm0            # Promote precision                         \n\
 psllw     $3, %%xmm1            # Promote precision                         \n\
-movdqa    %%xmm0, %%xmm2        # Copy 8 Cb       00 u3 00 u2 00 u1 00 u0   \n\
-movdqa    %%xmm1, %%xmm3        # Copy 8 Cr       00 v3 00 v2 00 v1 00 v0   \n\
+movdqa    %%xmm0, %%xmm2        # Copy 8 Cb       00 u7 ... 00 u1 00 u0     \n\
+movdqa    %%xmm1, %%xmm3        # Copy 8 Cr       00 v7 ... 00 v1 00 v0     \n\
 movl      $0xf37df37d, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     f37d f37d ... f37d f37d   \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     f3 7d ... f3 7d f3 7d     \n\
 pmulhw    %%xmm5, %%xmm2        # Mul Cb with green coeff -> Cb green       \n\
 movl      $0xe5fce5fc, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     e5fc e5fc ... e5fc e5fc   \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     e5 fc ... e5 fc e5 fc     \n\
 pmulhw    %%xmm5, %%xmm3        # Mul Cr with green coeff -> Cr green       \n\
 movl      $0x40934093, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     4093 4093 ... 4093 4093   \n\
-pmulhw    %%xmm5, %%xmm0        # Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0   \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     40 93 ... 40 93 40 93     \n\
+pmulhw    %%xmm5, %%xmm0        # Mul Cb -> Cblue 00 b7 ... 00 b1 00 b0     \n\
 movl      $0x33123312, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     3312 3312 ... 3312 3312   \n\
-pmulhw    %%xmm5, %%xmm1        # Mul Cr -> Cred  00 r3 00 r2 00 r1 00 r0   \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     33 12 ... 33 12 33 12     \n\
+pmulhw    %%xmm5, %%xmm1        # Mul Cr -> Cred  00 r7 ... 00 r1 00 r0     \n\
 paddsw    %%xmm3, %%xmm2        # Cb green + Cr green -> Cgreen             \n\
                                                                             \n\
 # convert the luma part                                                     \n\
 movl      $0x10101010, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to   1010 1010 ... 1010 1010     \n\
+pshufd    $0, %%xmm5, %%xmm5    # Set xmm5 to     10 10 ... 10 10 10 10     \n\
 psubusb   %%xmm5, %%xmm6        # Y -= 16                                   \n\
-movdqa    %%xmm6, %%xmm7        # Copy 16 Y       Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
+movdqa    %%xmm6, %%xmm7        # Copy 16 Y       YF YE YD ... Y2 Y1 Y0     \n\
 movl      $0x00ff00ff, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     00ff 00ff ... 00ff 00ff   \n\
-pand      %%xmm5, %%xmm6        # get Y even      00 Y6 00 Y4 00 Y2 00 Y0   \n\
-psrlw     $8, %%xmm7            # get Y odd       00 Y7 00 Y5 00 Y3 00 Y1   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     00 ff ... 00 ff 00 ff     \n\
+pand      %%xmm5, %%xmm6        # get Y even      00 YD ... 00 Y2 00 Y0     \n\
+psrlw     $8, %%xmm7            # get Y odd       00 YF ... 00 Y3 00 Y1     \n\
 psllw     $3, %%xmm6            # Promote precision                         \n\
 psllw     $3, %%xmm7            # Promote precision                         \n\
 movl      $0x253f253f, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     253f 253f ... 253f 253f   \n\
-pmulhw    %%xmm5, %%xmm6        # Mul 8 Y even    00 y6 00 y4 00 y2 00 y0   \n\
-pmulhw    %%xmm5, %%xmm7        # Mul 8 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     25 3f ... 25 3f 25 3f     \n\
+pmulhw    %%xmm5, %%xmm6        # Mul 8 Y even    00 yD ... 00 y2 00 y0     \n\
+pmulhw    %%xmm5, %%xmm7        # Mul 8 Y odd     00 yF ... 00 y3 00 y1     \n\
 "
 
 #define SSE2_YUV_ADD "                                                      \n\
@@ -123,307 +123,307 @@ pmulhw    %%xmm5, %%xmm7        # Mul 8 Y odd     00 y7 00 y5 00 y3 00 y1   \n\
 movdqa    %%xmm0, %%xmm3        # Copy Cblue                                \n\
 movdqa    %%xmm1, %%xmm4        # Copy Cred                                 \n\
 movdqa    %%xmm2, %%xmm5        # Copy Cgreen                               \n\
-paddsw    %%xmm6, %%xmm0        # Y even + Cblue  00 B6 00 B4 00 B2 00 B0   \n\
-paddsw    %%xmm7, %%xmm3        # Y odd  + Cblue  00 B7 00 B5 00 B3 00 B1   \n\
-paddsw    %%xmm6, %%xmm1        # Y even + Cred   00 R6 00 R4 00 R2 00 R0   \n\
-paddsw    %%xmm7, %%xmm4        # Y odd  + Cred   00 R7 00 R5 00 R3 00 R1   \n\
-paddsw    %%xmm6, %%xmm2        # Y even + Cgreen 00 G6 00 G4 00 G2 00 G0   \n\
-paddsw    %%xmm7, %%xmm5        # Y odd  + Cgreen 00 G7 00 G5 00 G3 00 G1   \n\
+paddsw    %%xmm6, %%xmm0        # Y even + Cblue  00 BE ... 00 B2 00 B0     \n\
+paddsw    %%xmm7, %%xmm3        # Y odd  + Cblue  00 BF ... 00 B3 00 B1     \n\
+paddsw    %%xmm6, %%xmm1        # Y even + Cred   00 RE ... 00 R2 00 R0     \n\
+paddsw    %%xmm7, %%xmm4        # Y odd  + Cred   00 RF ... 00 R3 00 R1     \n\
+paddsw    %%xmm6, %%xmm2        # Y even + Cgreen 00 GE ... 00 G2 00 G0     \n\
+paddsw    %%xmm7, %%xmm5        # Y odd  + Cgreen 00 GF ... 00 G3 00 G1     \n\
                                                                             \n\
 # Limit RGB even to 0..255                                                  \n\
-packuswb  %%xmm0, %%xmm0        # B6 B4 B2 B0 / B6 B4 B2 B0                 \n\
-packuswb  %%xmm1, %%xmm1        # R6 R4 R2 R0 / R6 R4 R2 R0                 \n\
-packuswb  %%xmm2, %%xmm2        # G6 G4 G2 G0 / G6 G4 G2 G0                 \n\
+packuswb  %%xmm0, %%xmm0        #             ... B4 B2 B0 ... B4 B2 B0     \n\
+packuswb  %%xmm1, %%xmm1        #             ... R4 R2 R0 ... R4 R2 R0     \n\
+packuswb  %%xmm2, %%xmm2        #             ... G4 G2 G0 ... G4 G2 G0     \n\
                                                                             \n\
 # Limit RGB odd to 0..255                                                   \n\
-packuswb  %%xmm3, %%xmm3        # B7 B5 B3 B1 / B7 B5 B3 B1                 \n\
-packuswb  %%xmm4, %%xmm4        # R7 R5 R3 R1 / R7 R5 R3 R1                 \n\
-packuswb  %%xmm5, %%xmm5        # G7 G5 G3 G1 / G7 G5 G3 G1                 \n\
+packuswb  %%xmm3, %%xmm3        #             ... B5 B3 B1 ... B5 B3 B1     \n\
+packuswb  %%xmm4, %%xmm4        #             ... R5 R3 R1 ... R5 R3 R1     \n\
+packuswb  %%xmm5, %%xmm5        #             ... G5 G3 G1 ... G5 G3 G1     \n\
                                                                             \n\
 # Interleave RGB even and odd                                               \n\
-punpcklbw %%xmm3, %%xmm0        #                 B7 B6 B5 B4 B3 B2 B1 B0   \n\
-punpcklbw %%xmm4, %%xmm1        #                 R7 R6 R5 R4 R3 R2 R1 R0   \n\
-punpcklbw %%xmm5, %%xmm2        #                 G7 G6 G5 G4 G3 G2 G1 G0   \n\
+punpcklbw %%xmm3, %%xmm0        #                 BF BE BD ... B2 B1 B0     \n\
+punpcklbw %%xmm4, %%xmm1        #                 RF RE RD ... R2 R1 R0     \n\
+punpcklbw %%xmm5, %%xmm2        #                 GF GE GD ... G2 G1 G0     \n\
 "
 
-#define SSE2_UNPACK_15_ALIGNED "                                            \n\
+#define SSE2_UNPACK_15_ALIGNED "# Note, much of this shows bit patterns (of a pair of bytes) \n\
 # mask unneeded bits off                                                    \n\
 movl      $0xf8f8f8f8, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8 f8 ... f8 f8 f8 f8     \n\
 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
-movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
-movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+movdqa    %%xmm0, %%xmm5        # Copy BF-B0                                \n\
+movdqa    %%xmm2, %%xmm7        # Copy GF-G0                                \n\
                                                                             \n\
-# convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 0-7                       \n\
 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
-punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+punpcklbw %%xmm1, %%xmm0        # __r7r6r5 r4r3____ ______b7 b6b5b4b3       \n\
 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
-por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
+por       %%xmm2, %%xmm0        # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3       \n\
 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
                                                                             \n\
-# convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
+# pack the 3 separate RGB bytes into 2 for pixels 8-15                      \n\
 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
-punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+punpckhbw %%xmm1, %%xmm5        # __r7r6r5 r4r3____ ______b7 b6b5b4b3       \n\
 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
-por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
-movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+por       %%xmm7, %%xmm5        # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3       \n\
+movntdq   %%xmm5, 16(%3)        # store pixel 8-15                          \n\
 "
 
-#define SSE2_UNPACK_15_UNALIGNED "                                          \n\
+#define SSE2_UNPACK_15_UNALIGNED "# Note, much of this shows bit patterns (of a pair of bytes) \n\
 # mask unneeded bits off                                                    \n\
 movl      $0xf8f8f8f8, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8 f8 ... f8 f8 f8 f8     \n\
 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3______ g7g6g5g4 g3______       \n\
 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
 psrlw     $1,%%xmm1             # __r7r6r5 r4r3____ __r7r6r5 r4r3____       \n\
 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
-movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
-movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+movdqa    %%xmm0, %%xmm5        # Copy BF-B0                                \n\
+movdqa    %%xmm2, %%xmm7        # Copy GF-G0                                \n\
                                                                             \n\
-# convert rgb24 plane to rgb15 pack for pixel 0-7                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 0-7                       \n\
 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3______       \n\
-punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+punpcklbw %%xmm1, %%xmm0        # __r7r6r5 r4r3____ ______b7 b6b5b4b3       \n\
 psllw     $2,%%xmm2             # ________ ____g7g6 g5g4g3__ ________       \n\
-por       %%xmm2, %%xmm0        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
+por       %%xmm2, %%xmm0        # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3       \n\
 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
                                                                             \n\
-# convert rgb24 plane to rgb15 pack for pixel 8-15                          \n\
+# pack the 3 separate RGB bytes into 2 for pixels 8-15                      \n\
 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3______       \n\
-punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
+punpckhbw %%xmm1, %%xmm5        # __r7r6r5 r4r3____ ______b7 b6b5b4b3       \n\
 psllw     $2,%%xmm7             # ________ ____g7g6 g5g4g3__ ________       \n\
-por       %%xmm7, %%xmm5        # r7r6r5r4 r3__g7g6 g5g4g3b7 b6b5b4b3       \n\
-movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+por       %%xmm7, %%xmm5        # __r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3       \n\
+movdqu    %%xmm5, 16(%3)        # store pixel 8-15                          \n\
 "
 
-#define SSE2_UNPACK_16_ALIGNED "                                            \n\
+#define SSE2_UNPACK_16_ALIGNED "# Note, much of this shows bit patterns (of a pair of bytes) \n\
 # mask unneeded bits off                                                    \n\
 movl      $0xf8f8f8f8, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8 f8 ... f8 f8 f8 f8     \n\
 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
 movl      $0xfcfcfcfc, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     fc fc ... fc fc fc fc     \n\
 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
-movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
-movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+movdqa    %%xmm0, %%xmm5        # Copy BF-B0                                \n\
+movdqa    %%xmm2, %%xmm7        # Copy GF-G0                                \n\
                                                                             \n\
-# convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 0-7                       \n\
 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
 movntdq   %%xmm0, (%3)          # store pixel 0-7                           \n\
                                                                             \n\
-# convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
+# pack the 3 separate RGB bytes into 2 for pixels 8-15                      \n\
 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
-movntdq   %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+movntdq   %%xmm5, 16(%3)        # store pixel 8-15                          \n\
 "
 
-#define SSE2_UNPACK_16_UNALIGNED "                                          \n\
+#define SSE2_UNPACK_16_UNALIGNED "# Note, much of this shows bit patterns (of a pair of bytes) \n\
 # mask unneeded bits off                                                    \n\
 movl      $0xf8f8f8f8, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8 f8 ... f8 f8 f8 f8     \n\
 pand      %%xmm5, %%xmm0        # b7b6b5b4 b3______ b7b6b5b4 b3______       \n\
 pand      %%xmm5, %%xmm1        # r7r6r5r4 r3______ r7r6r5r4 r3______       \n\
 movl      $0xfcfcfcfc, %%eax    #                                           \n\
 movd      %%eax, %%xmm5         #                                           \n\
-pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     f8f8 f8f8 ... f8f8 f8f8   \n\
+pshufd    $0, %%xmm5, %%xmm5    # set xmm5 to     fc fc ... fc fc fc fc     \n\
 pand      %%xmm5, %%xmm2        # g7g6g5g4 g3g2____ g7g6g5g4 g3g2____       \n\
 psrlw     $3,%%xmm0             # ______b7 b6b5b4b3 ______b7 b6b5b4b3       \n\
 pxor      %%xmm4, %%xmm4        # zero mm4                                  \n\
-movdqa    %%xmm0, %%xmm5        # Copy B15-B0                               \n\
-movdqa    %%xmm2, %%xmm7        # Copy G15-G0                               \n\
+movdqa    %%xmm0, %%xmm5        # Copy BF-B0                                \n\
+movdqa    %%xmm2, %%xmm7        # Copy GF-G0                                \n\
                                                                             \n\
-# convert rgb24 plane to rgb16 pack for pixel 0-7                           \n\
+# pack the 3 separate RGB bytes into 2 for pixels 0-7                       \n\
 punpcklbw %%xmm4, %%xmm2        # ________ ________ g7g6g5g4 g3g2____       \n\
 punpcklbw %%xmm1, %%xmm0        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
 psllw     $3,%%xmm2             # ________ __g7g6g5 g4g3g2__ ________       \n\
 por       %%xmm2, %%xmm0        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
 movdqu    %%xmm0, (%3)          # store pixel 0-7                           \n\
                                                                             \n\
-# convert rgb24 plane to rgb16 pack for pixel 8-15                          \n\
+# pack the 3 separate RGB bytes into 2 for pixels 8-15                      \n\
 punpckhbw %%xmm4, %%xmm7        # ________ ________ g7g6g5g4 g3g2____       \n\
 punpckhbw %%xmm1, %%xmm5        # r7r6r5r4 r3______ ______b7 b6b5b4b3       \n\
 psllw     $3,%%xmm7             # ________ __g7g6g5 g4g3g2__ ________       \n\
 por       %%xmm7, %%xmm5        # r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3       \n\
-movdqu    %%xmm5, 16(%3)        # store pixel 4-7                           \n\
+movdqu    %%xmm5, 16(%3)        # store pixel 8-15                          \n\
 "
 
-#define SSE2_UNPACK_32_ARGB_ALIGNED "                                       \n\
-pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
-movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
-punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
-movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
-punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
-movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
-punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
-movntdq   %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
-punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
-movntdq   %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
-punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
-punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
-movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
-punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
-movntdq   %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
-punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
-movntdq   %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
+#define SSE2_UNPACK_32_ARGB_ALIGNED "                                     \n\
+pxor      %%xmm3, %%xmm3  # zero xmm3                                     \n\
+movdqa    %%xmm0, %%xmm4  #               BF BE BD ... B2 B1 B0           \n\
+punpcklbw %%xmm2, %%xmm4  #               G7 B7 ... G1 B1 G0 B0           \n\
+movdqa    %%xmm1, %%xmm5  #               RF RE RD ... R2 R1 R0           \n\
+punpcklbw %%xmm3, %%xmm5  #               00 R7 ... 00 R1 00 R0           \n\
+movdqa    %%xmm4, %%xmm6  #               G7 B7 ... G1 B1 G0 B0           \n\
+punpcklwd %%xmm5, %%xmm4  #               00 R3 ... 00 R0 G0 B0           \n\
+movntdq   %%xmm4, (%3)    # Store ARGB3 ... ARGB0                         \n\
+punpckhwd %%xmm5, %%xmm6  #               00 R7 ... 00 R4 G4 B4           \n\
+movntdq   %%xmm6, 16(%3)  # Store ARGB7 ... ARGB4                         \n\
+punpckhbw %%xmm2, %%xmm0  #               GB BB ... G9 B9 G8 B8           \n\
+punpckhbw %%xmm3, %%xmm1  #               00 RF ... 00 R9 00 R8           \n\
+movdqa    %%xmm0, %%xmm5  #               GF BF ... G9 B9 G8 B8           \n\
+punpcklwd %%xmm1, %%xmm5  #               00 RB ... 00 R8 G8 B8           \n\
+movntdq   %%xmm5, 32(%3)  # Store ARGB11 ... ARGB8                        \n\
+punpckhwd %%xmm1, %%xmm0  #               00 RF ... 00 RC GC BC           \n\
+movntdq   %%xmm0, 48(%3)  # Store ARGB15 ... ARGB12                       \n\
 "
 
-#define SSE2_UNPACK_32_ARGB_UNALIGNED "                                     \n\
-pxor      %%xmm3, %%xmm3  # zero xmm3                                       \n\
-movdqa    %%xmm0, %%xmm4  #               B7 B6 B5 B4 B3 B2 B1 B0           \n\
-punpcklbw %%xmm2, %%xmm4  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
-movdqa    %%xmm1, %%xmm5  #               R7 R6 R5 R4 R3 R2 R1 R0           \n\
-punpcklbw %%xmm3, %%xmm5  #               00 R3 00 R2 00 R1 00 R0           \n\
-movdqa    %%xmm4, %%xmm6  #               G3 B3 G2 B2 G1 B1 G0 B0           \n\
-punpcklwd %%xmm5, %%xmm4  #               00 R1 B1 G1 00 R0 B0 G0           \n\
-movdqu    %%xmm4, (%3)    # Store ARGB3 ARGB2 ARGB1 ARGB0                   \n\
-punpckhwd %%xmm5, %%xmm6  #               00 R3 B3 G3 00 R2 B2 G2           \n\
-movdqu    %%xmm6, 16(%3)  # Store ARGB7 ARGB6 ARGB5 ARGB4                   \n\
-punpckhbw %%xmm2, %%xmm0  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
-punpckhbw %%xmm3, %%xmm1  #               00 R7 00 R6 00 R5 00 R4           \n\
-movdqa    %%xmm0, %%xmm5  #               G7 B7 G6 B6 G5 B5 G4 B4           \n\
-punpcklwd %%xmm1, %%xmm5  #               00 R5 B5 G5 00 R4 B4 G4           \n\
-movdqu    %%xmm5, 32(%3)  # Store ARGB11 ARGB10 ARGB9 ARGB8                 \n\
-punpckhwd %%xmm1, %%xmm0  #               00 R7 B7 G7 00 R6 B6 G6           \n\
-movdqu    %%xmm0, 48(%3)  # Store ARGB15 ARGB14 ARGB13 ARGB12               \n\
+#define SSE2_UNPACK_32_ARGB_UNALIGNED "                                   \n\
+pxor      %%xmm3, %%xmm3  # zero xmm3                                     \n\
+movdqa    %%xmm0, %%xmm4  #               BF BE BD ... B2 B1 B0           \n\
+punpcklbw %%xmm2, %%xmm4  #               G7 B7 ... G1 B1 G0 B0           \n\
+movdqa    %%xmm1, %%xmm5  #               RF RE RD ... R2 R1 R0           \n\
+punpcklbw %%xmm3, %%xmm5  #               00 R7 ... 00 R1 00 R0           \n\
+movdqa    %%xmm4, %%xmm6  #               G7 B7 ... G1 B1 G0 B0           \n\
+punpcklwd %%xmm5, %%xmm4  #               00 R3 ... 00 R0 G0 B0           \n\
+movdqu    %%xmm4, (%3)    # Store ARGB3 ... ARGB0                         \n\
+punpckhwd %%xmm5, %%xmm6  #               00 R7 ... 00 R4 G4 B4           \n\
+movdqu    %%xmm6, 16(%3)  # Store ARGB7 ... ARGB4                         \n\
+punpckhbw %%xmm2, %%xmm0  #               GF BF ... G9 B9 G8 B8           \n\
+punpckhbw %%xmm3, %%xmm1  #               00 RF ... 00 R9 00 R8           \n\
+movdqa    %%xmm0, %%xmm5  #               GF BF ... G9 B9 G8 B8           \n\
+punpcklwd %%xmm1, %%xmm5  #               00 RB ... 00 R8 G8 B8           \n\
+movdqu    %%xmm5, 32(%3)  # Store ARGB11 ... ARGB8                        \n\
+punpckhwd %%xmm1, %%xmm0  #               00 RF ... 00 RC GC BC           \n\
+movdqu    %%xmm0, 48(%3)  # Store ARGB15 ... ARGB12                       \n\
 "
 
-#define SSE2_UNPACK_32_RGBA_ALIGNED "                                       \n\
-pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
-movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
-punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
-punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
-movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
-punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
-movntdq   %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
-punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
-movntdq   %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
-pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
-punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
-punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
-movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
-punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
-movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 RGBA8                 \n\
-punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
-movntdq   %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
+#define SSE2_UNPACK_32_RGBA_ALIGNED "                                     \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                      \n\
+movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 ... G2 G1 G0         \n\
+punpcklbw %%xmm1, %%xmm4  #                 R7 G7 ... R1 G1 R0 G0         \n\
+punpcklbw %%xmm0, %%xmm3  #                 B7 00 ... B1 00 B0 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R7 00 ... R1 00 R0 00         \n\
+punpcklwd %%xmm4, %%xmm3  #                 R3 G3 ... R0 B0 G0 00         \n\
+movntdq   %%xmm3, (%3)    # Store RGBA3 ... RGBA0                         \n\
+punpckhwd %%xmm4, %%xmm5  #                 R7 G7 ... R4 G4 B4 00         \n\
+movntdq   %%xmm5, 16(%3)  # Store RGBA7 ... RGBA4                         \n\
+pxor      %%xmm6, %%xmm6  # zero mm6                                      \n\
+punpckhbw %%xmm1, %%xmm2  #                 RB GB ... R9 G9 R8 G8         \n\
+punpckhbw %%xmm0, %%xmm6  #                 BF 00 ... B9 00 B8 00         \n\
+movdqa    %%xmm6, %%xmm0  #                 BF 00 ... B9 00 B8 00         \n\
+punpcklwd %%xmm2, %%xmm6  #                 RB GB ... R8 G8 B8 00         \n\
+movntdq   %%xmm6, 32(%3)  # Store BGRA11 ... RGBA8                        \n\
+punpckhwd %%xmm2, %%xmm0  #                 RF GF ... RC GC BC 00         \n\
+movntdq   %%xmm0, 48(%3)  # Store RGBA15 ... RGBA12                       \n\
 "
 
-#define SSE2_UNPACK_32_RGBA_UNALIGNED "                                     \n\
-pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
-movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
-punpcklbw %%xmm1, %%xmm4  #                 R3 G3 R2 G2 R1 G1 R0 G0         \n\
-punpcklbw %%xmm0, %%xmm3  #                 B3 00 B2 00 B1 00 B0 00         \n\
-movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
-punpcklwd %%xmm4, %%xmm3  #                 R1 G1 B1 00 R0 B0 G0 00         \n\
-movdqu    %%xmm3, (%3)    # Store RGBA3 RGBA2 RGBA1 RGBA0                   \n\
-punpckhwd %%xmm4, %%xmm5  #                 R3 G3 B3 00 R2 G2 B2 00         \n\
-movdqu    %%xmm5, 16(%3)  # Store RGBA7 RGBA6 RGBA5 RGBA4                   \n\
-pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
-punpckhbw %%xmm1, %%xmm2  #                 R7 G7 R6 G6 R5 G5 R4 G4         \n\
-punpckhbw %%xmm0, %%xmm6  #                 B7 00 B6 00 B5 00 B4 00         \n\
-movdqa    %%xmm6, %%xmm0  #                 B7 00 B6 00 B5 00 B4 00         \n\
-punpcklwd %%xmm2, %%xmm6  #                 R5 G5 B5 00 R4 G4 B4 00         \n\
-movdqu    %%xmm6, 32(%3)  # Store RGBA11 RGBA10 RGBA9 RGBA8                 \n\
-punpckhwd %%xmm2, %%xmm0  #                 R7 G7 B7 00 R6 G6 B6 00         \n\
-movdqu    %%xmm0, 48(%3)  # Store RGBA15 RGBA14 RGBA13 RGBA12               \n\
+#define SSE2_UNPACK_32_RGBA_UNALIGNED "                                   \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                      \n\
+movdqa    %%xmm2, %%xmm4  #                 GF GE GD ... G2 G1 G0         \n\
+punpcklbw %%xmm1, %%xmm4  #                 R7 G7 ... R1 G1 R0 G0         \n\
+punpcklbw %%xmm0, %%xmm3  #                 B7 00 ... B1 00 B0 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R7 00 ... R1 00 R0 00         \n\
+punpcklwd %%xmm4, %%xmm3  #                 R3 G3 ... R0 B0 G0 00         \n\
+movdqu    %%xmm3, (%3)    # Store RGBA3 ... RGBA0                         \n\
+punpckhwd %%xmm4, %%xmm5  #                 R7 G7 ... R4 G4 B4 00         \n\
+movdqu    %%xmm5, 16(%3)  # Store RGBA7 ... RGBA4                         \n\
+pxor      %%xmm6, %%xmm6  # zero mm6                                      \n\
+punpckhbw %%xmm1, %%xmm2  #                 RF GF ... R9 G9 R8 G8         \n\
+punpckhbw %%xmm0, %%xmm6  #                 BF 00 ... B9 00 B8 00         \n\
+movdqa    %%xmm6, %%xmm0  #                 BF 00 ... B9 00 B8 00         \n\
+punpcklwd %%xmm2, %%xmm6  #                 RB GB ... R8 G8 B8 00         \n\
+movdqu    %%xmm6, 32(%3)  # Store RGBA11 ... RGBA8                        \n\
+punpckhwd %%xmm2, %%xmm0  #                 RF GF ... RC GC BC 00         \n\
+movdqu    %%xmm0, 48(%3)  # Store RGBA15 ... RGBA12                       \n\
 "
 
-#define SSE2_UNPACK_32_BGRA_ALIGNED "                                       \n\
-pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
-movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
-punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
-punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
-movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
-punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
-movntdq   %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
-punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
-movntdq   %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
-pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
-punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
-punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
-movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
-punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
-movntdq   %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
-punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
-movntdq   %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
+#define SSE2_UNPACK_32_BGRA_ALIGNED "                                     \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                      \n\
+movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 ... G2 G1 G0         \n\
+punpcklbw %%xmm0, %%xmm4  #                 B7 G7 ... B1 G1 B0 G0         \n\
+punpcklbw %%xmm1, %%xmm3  #                 R7 00 ... R1 00 R0 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R7 00 ... R1 00 R0 00         \n\
+punpcklwd %%xmm4, %%xmm3  #                 B3 G3 ... B0 G0 R0 00         \n\
+movntdq   %%xmm3, (%3)    # Store BGRA3 ... BGRA0                         \n\
+punpckhwd %%xmm4, %%xmm5  #                 B7 G7 ... B4 G4 R4 00         \n\
+movntdq   %%xmm5, 16(%3)  # Store BGRA7 ... BGRA4                         \n\
+pxor      %%xmm6, %%xmm6  # zero mm6                                      \n\
+punpckhbw %%xmm0, %%xmm2  #                 BF GF ... B9 G9 B8 G8         \n\
+punpckhbw %%xmm1, %%xmm6  #                 RF 00 ... R9 00 R8 00         \n\
+movdqa    %%xmm6, %%xmm0  #                 RF 00 ... R9 00 R8 00         \n\
+punpcklwd %%xmm2, %%xmm6  #                 BB GB ... B8 G8 R8 00         \n\
+movntdq   %%xmm6, 32(%3)  # Store BGRA11 ... BGRA8                        \n\
+punpckhwd %%xmm2, %%xmm0  #                 BF GF ... BC GC RC 00         \n\
+movntdq   %%xmm0, 48(%3)  # Store BGRA15 ... BGRA12                       \n\
 "
 
-#define SSE2_UNPACK_32_BGRA_UNALIGNED "                                     \n\
-pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
-movdqa    %%xmm2, %%xmm4  #                 G7 G6 G5 G4 G3 G2 G1 G0         \n\
-punpcklbw %%xmm0, %%xmm4  #                 B3 G3 B2 G2 B1 G1 B0 G0         \n\
-punpcklbw %%xmm1, %%xmm3  #                 R3 00 R2 00 R1 00 R0 00         \n\
-movdqa    %%xmm3, %%xmm5  #                 R3 00 R2 00 R1 00 R0 00         \n\
-punpcklwd %%xmm4, %%xmm3  #                 B1 G1 R1 00 B0 G0 R0 00         \n\
-movdqu    %%xmm3, (%3)    # Store BGRA3 BGRA2 BGRA1 BGRA0                   \n\
-punpckhwd %%xmm4, %%xmm5  #                 B3 G3 R3 00 B2 G2 R2 00         \n\
-movdqu    %%xmm5, 16(%3)  # Store BGRA7 BGRA6 BGRA5 BGRA4                   \n\
-pxor      %%xmm6, %%xmm6  # zero mm6                                        \n\
-punpckhbw %%xmm0, %%xmm2  #                 B7 G7 B6 G6 B5 G5 B4 G4         \n\
-punpckhbw %%xmm1, %%xmm6  #                 R7 00 R6 00 R5 00 R4 00         \n\
-movdqa    %%xmm6, %%xmm0  #                 R7 00 R6 00 R5 00 R4 00         \n\
-punpcklwd %%xmm2, %%xmm6  #                 B5 G5 R5 00 B4 G4 R4 00         \n\
-movdqu    %%xmm6, 32(%3)  # Store BGRA11 BGRA10 BGRA9 BGRA8                 \n\
-punpckhwd %%xmm2, %%xmm0  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
-movdqu    %%xmm0, 48(%3)  # Store BGRA15 BGRA14 BGRA13 BGRA12               \n\
+#define SSE2_UNPACK_32_BGRA_UNALIGNED "                                   \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                      \n\
+movdqa    %%xmm2, %%xmm4  #                 GF GE GD ... G2 G1 G0         \n\
+punpcklbw %%xmm0, %%xmm4  #                 B7 G7 ... B1 G1 B0 G0         \n\
+punpcklbw %%xmm1, %%xmm3  #                 R7 00 ... R1 00 R0 00         \n\
+movdqa    %%xmm3, %%xmm5  #                 R7 00 ... R1 00 R0 00         \n\
+punpcklwd %%xmm4, %%xmm3  #                 B3 G3 ... B0 G0 R0 00         \n\
+movdqu    %%xmm3, (%3)    # Store BGRA3 ... BGRA0                         \n\
+punpckhwd %%xmm4, %%xmm5  #                 B7 G7 ... B4 G4 R4 00         \n\
+movdqu    %%xmm5, 16(%3)  # Store BGRA7 ... BGRA4                         \n\
+pxor      %%xmm6, %%xmm6  # zero mm6                                      \n\
+punpckhbw %%xmm0, %%xmm2  #                 BC GC ... B9 G9 B8 G8         \n\
+punpckhbw %%xmm1, %%xmm6  #                 RC 00 ... R9 00 R8 00         \n\
+movdqa    %%xmm6, %%xmm0  #                 RC 00 ... R9 00 R8 00         \n\
+punpcklwd %%xmm2, %%xmm6  #                 BB GB ... B8 G8 R8 00         \n\
+movdqu    %%xmm6, 32(%3)  # Store BGRA11 ... BGRA8                        \n\
+punpckhwd %%xmm2, %%xmm0  #                 BF GF ... BC GC RC 00         \n\
+movdqu    %%xmm0, 48(%3)  # Store BGRA15 ... BGRA12                       \n\
 "
 
-#define SSE2_UNPACK_32_ABGR_ALIGNED "                                       \n\
-pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
-movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
-punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
-movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
-punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
-movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
-punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
-movntdq   %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
-punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
-movntdq   %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
-punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
-punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
-movdqa    %%xmm1, %%xmm2  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
-punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
-movntdq   %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
-punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
-movntdq   %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
+#define SSE2_UNPACK_32_ABGR_ALIGNED "                                     \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                      \n\
+movdqa    %%xmm1, %%xmm4  #                 RF RE RD ... R2 R1 R0         \n\
+punpcklbw %%xmm2, %%xmm4  #                 G7 R7 ... G1 R1 G0 R0         \n\
+movdqa    %%xmm0, %%xmm5  #                 BF BE BD ... B2 B1 B0         \n\
+punpcklbw %%xmm3, %%xmm5  #                 00 B7 ... 00 B1 00 B0         \n\
+movdqa    %%xmm4, %%xmm6  #                 G7 R7 ... G1 R1 G0 R0         \n\
+punpcklwd %%xmm5, %%xmm4  #                 00 B3 ... 00 B0 G0 R0         \n\
+movntdq   %%xmm4, (%3)    # Store ABGR3 ... ABGR0                         \n\
+punpckhwd %%xmm5, %%xmm6  #                 00 B7 ... 00 B4 G4 R4         \n\
+movntdq   %%xmm6, 16(%3)  # Store ABGR7 ... ABGR4                         \n\
+punpckhbw %%xmm2, %%xmm1  #                 GF RF ... G9 R9 G8 R8         \n\
+punpckhbw %%xmm3, %%xmm0  #                 00 BF ... 00 B9 00 B8         \n\
+movdqa    %%xmm1, %%xmm2  #                 GF RF ... G9 R9 G8 R8         \n\
+punpcklwd %%xmm0, %%xmm1  #                 00 BB ... 00 B8 G8 R8         \n\
+movntdq   %%xmm1, 32(%3)  # Store ABGR11 ... ABGR8                        \n\
+punpckhwd %%xmm0, %%xmm2  #                 00 BF ... 00 BC GC RC         \n\
+movntdq   %%xmm2, 48(%3)  # Store ABGR15 ... ABGR12                       \n\
 "
 
-#define SSE2_UNPACK_32_ABGR_UNALIGNED "                                     \n\
-pxor      %%xmm3, %%xmm3  # zero mm3                                        \n\
-movdqa    %%xmm1, %%xmm4  #                 R7 R6 R5 R4 R3 R2 R1 R0         \n\
-punpcklbw %%xmm2, %%xmm4  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
-movdqa    %%xmm0, %%xmm5  #                 B7 B6 B5 B4 B3 B2 B1 B0         \n\
-punpcklbw %%xmm3, %%xmm5  #                 00 B3 00 B2 00 B1 00 B0         \n\
-movdqa    %%xmm4, %%xmm6  #                 G3 R3 G2 R2 G1 R1 G0 R0         \n\
-punpcklwd %%xmm5, %%xmm4  #                 00 B1 G1 R1 00 B0 G0 R0         \n\
-movdqu    %%xmm4, (%3)    # Store ABGR3 ABGR2 ABGR1 ABGR0                   \n\
-punpckhwd %%xmm5, %%xmm6  #                 00 B3 G3 R3 00 B2 G2 R2         \n\
-movdqu    %%xmm6, 16(%3)  # Store ABGR7 ABGR6 ABGR5 ABGR4                   \n\
-punpckhbw %%xmm2, %%xmm1  #                 G7 R7 G6 R6 G5 R5 G4 R4         \n\
-punpckhbw %%xmm3, %%xmm0  #                 00 B7 00 B6 00 B5 00 B4         \n\
-movdqa    %%xmm1, %%xmm2  #                 R7 00 R6 00 R5 00 R4 00         \n\
-punpcklwd %%xmm0, %%xmm1  #                 00 B5 G5 R5 00 B4 G4 R4         \n\
-movdqu    %%xmm1, 32(%3)  # Store ABGR11 ABGR10 ABGR9 ABGR8                 \n\
-punpckhwd %%xmm0, %%xmm2  #                 B7 G7 R7 00 B6 G6 R6 00         \n\
-movdqu    %%xmm2, 48(%3)  # Store ABGR15 ABGR14 ABGR13 ABGR12               \n\
+#define SSE2_UNPACK_32_ABGR_UNALIGNED "                                   \n\
+pxor      %%xmm3, %%xmm3  # zero mm3                                      \n\
+movdqa    %%xmm1, %%xmm4  #                 RF RE RD ... R2 R1 R0         \n\
+punpcklbw %%xmm2, %%xmm4  #                 G7 R7 ... G1 R1 G0 R0         \n\
+movdqa    %%xmm0, %%xmm5  #                 BF BE BD ... B2 B1 B0         \n\
+punpcklbw %%xmm3, %%xmm5  #                 00 B7 ... 00 B1 00 B0         \n\
+movdqa    %%xmm4, %%xmm6  #                 G7 R7 ... G1 R1 G0 R0         \n\
+punpcklwd %%xmm5, %%xmm4  #                 00 B3 ... 00 B0 G0 R0         \n\
+movdqu    %%xmm4, (%3)    # Store ABGR3 ... ABGR0                         \n\
+punpckhwd %%xmm5, %%xmm6  #                 00 B7 ... 00 B4 G4 R4         \n\
+movdqu    %%xmm6, 16(%3)  # Store ABGR7 ... ABGR4                         \n\
+punpckhbw %%xmm2, %%xmm1  #                 GF RF ... G9 R9 G8 R8         \n\
+punpckhbw %%xmm3, %%xmm0  #                 00 BF ... 00 B9 00 B8         \n\
+movdqa    %%xmm1, %%xmm2  #                 GF RF ... G9 R9 G8 R8         \n\
+punpcklwd %%xmm0, %%xmm1  #                 00 BB ... 00 B8 G8 R8         \n\
+movdqu    %%xmm1, 32(%3)  # Store ABGR11 ... ABGR8                        \n\
+punpckhwd %%xmm0, %%xmm2  #                 00 BF ... 00 BC GC RC         \n\
+movdqu    %%xmm2, 48(%3)  # Store ABGR15 ... ABGR12                       \n\
 "
 
 #elif defined(HAVE_SSE2_INTRINSICS)
diff --git a/modules/video_chroma/i420_yuy2.h b/modules/video_chroma/i420_yuy2.h
index 98c4731565..736fcab90a 100644
--- a/modules/video_chroma/i420_yuy2.h
+++ b/modules/video_chroma/i420_yuy2.h
@@ -86,10 +86,10 @@ punpcklbw %%mm2, %%mm1  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
 movq      %%mm1, %%mm2  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
 punpcklbw %%mm0, %%mm2  #                     y3 v1 y2 u1 y1 v0 y0 u0     \n\
 movq      %%mm2, (%0)   # Store low UYVY                                  \n\
-movq      %%mm1, %%mm2  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
-punpckhbw %%mm0, %%mm2  #                     y3 v1 y2 u1 y1 v0 y0 u0     \n\
+movq      %%mm1, %%mm2  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
+punpckhbw %%mm0, %%mm2  #                     y7 v3 y6 u3 y5 v2 y4 u2     \n\
 movq      %%mm2, 8(%0)  # Store high UYVY                                 \n\
-movq      %%mm1, %%mm4  #                     u3 v3 u2 v2 u1 v1 u0 v0     \n\
+movq      %%mm1, %%mm4  #                     v3 u3 v2 u2 v1 u1 v0 u0     \n\
 punpcklbw %%mm3, %%mm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0     \n\
 movq      %%mm4, (%1)   # Store low UYVY                                  \n\
 punpckhbw %%mm3, %%mm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2     \n\
@@ -177,8 +177,8 @@ movq      %%mm1, 8(%1)  # Store high UYVY                                 \n\
     do {                                \
     __asm__ __volatile__(               \
         ".p2align 3 \n\t                \
-movq        (%0), %%xmm1  # Load 8 Cb         u7 u6 u5 u4 u3 u2 u1 u0     \n\
-movq        (%1), %%xmm2  # Load 8 Cr         v7 06 v5 v4 v3 v2 v1 v0     \n\
+movq        (%0), %%xmm1  # Load 8 Cb         00 00 00 ... u2 u1 u0   \n\
+movq        (%1), %%xmm2  # Load 8 Cr         00 00 00 ... v2 v1 v0   \n\
 " \
         :                               \
         : "r" (p_u),  "r" (p_v)         \
@@ -197,108 +197,108 @@ movq        (%1), %%xmm2  # Load 8 Cr         v7 06 v5 v4 v3 v2 v1 v0     \n\
 
 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
 
-#define SSE2_YUV420_YUYV_ALIGNED "                                        \n\
-movdqa      (%2), %%xmm0  # Load 16 Y         y15 y14 y13 .. y2 y1 y0     \n\
-movdqa      (%3), %%xmm3  # Load 16 Y         Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
-punpcklbw %%xmm2, %%xmm1  #                   v7 u7 v6 u6 .. u1 v0 u0     \n\
-movdqa    %%xmm0, %%xmm2  #                   y15 y14 y13 .. y2 y1 y0     \n\
-punpcklbw %%xmm1, %%xmm2  #                   v3 y7 u3 .. v0 y1 u0 y0     \n\
-movntdq   %%xmm2, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm0  #                   v3 y7 u3 y6 v2 y5 u2 y4     \n\
-movntdq   %%xmm0, 16(%0)  # Store high YUYV                               \n\
-movdqa    %%xmm3, %%xmm4  #                   Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
-punpcklbw %%xmm1, %%xmm4  #                   v1 Y3 u1 Y2 v0 Y1 u0 Y0     \n\
-movntdq   %%xmm4, (%1)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm3  #                   v3 Y7 u3 Y6 v2 Y5 u2 Y4     \n\
-movntdq   %%xmm3, 16(%1)  # Store high YUYV                               \n\
+#define SSE2_YUV420_YUYV_ALIGNED "                                     \n\
+movdqa      (%2), %%xmm0  # Load 16 Y          yF yE yD ... y2 y1 y0   \n\
+movdqa      (%3), %%xmm3  # Load 16 Y          YF YE YD ... Y2 Y1 Y0   \n\
+punpcklbw %%xmm2, %%xmm1  #                    00 00 ... v1 u1 v0 u0   \n\
+movdqa    %%xmm0, %%xmm2  #                    yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm1, %%xmm2  #                    v3 y7 ... v0 y1 u0 y0   \n\
+movntdq   %%xmm2, (%0)    # Store low YUYV                             \n\
+punpckhbw %%xmm1, %%xmm0  #                    v7 yF ... v4 y9 u4 y8   \n\
+movntdq   %%xmm0, 16(%0)  # Store high YUYV                            \n\
+movdqa    %%xmm3, %%xmm4  #                    YF YE YD ... Y2 Y1 Y0   \n\
+punpcklbw %%xmm1, %%xmm4  #                    v3 Y7 ... v0 Y1 u0 Y0   \n\
+movntdq   %%xmm4, (%1)    # Store low YUYV                             \n\
+punpckhbw %%xmm1, %%xmm3  #                    v7 YF ... v4 Y9 u4 Y8   \n\
+movntdq   %%xmm3, 16(%1)  # Store high YUYV                            \n\
 "
 
-#define SSE2_YUV420_YUYV_UNALIGNED "                                      \n\
-movdqu      (%2), %%xmm0  # Load 16 Y         y7 y6 y5 y4 y3 y2 y1 y0     \n\
-movdqu      (%3), %%xmm3  # Load 16 Y         Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
-prefetchnta (%0)          # Tell CPU not to cache output YUYV data        \n\
-prefetchnta (%1)          # Tell CPU not to cache output YUYV data        \n\
-punpcklbw %%xmm2, %%xmm1  #                   v3 u3 v2 u2 v1 u1 v0 u0     \n\
-movdqa    %%xmm0, %%xmm2  #                   y7 y6 y5 y4 y3 y2 y1 y0     \n\
-punpcklbw %%xmm1, %%xmm2  #                   v1 y3 u1 y2 v0 y1 u0 y0     \n\
-movdqu    %%xmm2, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm0  #                   v3 y7 u3 y6 v2 y5 u2 y4     \n\
-movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\
-movdqa    %%xmm3, %%xmm4  #                   Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0     \n\
-punpcklbw %%xmm1, %%xmm4  #                   v1 Y3 u1 Y2 v0 Y1 u0 Y0     \n\
-movdqu    %%xmm4, (%1)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm3  #                   v3 Y7 u3 Y6 v2 Y5 u2 Y4     \n\
-movdqu    %%xmm3, 16(%1)  # Store high YUYV                               \n\
+#define SSE2_YUV420_YUYV_UNALIGNED "                                   \n\
+movdqu      (%2), %%xmm0  # Load 16 Y          yF yE yD ... y2 y1 y0   \n\
+movdqu      (%3), %%xmm3  # Load 16 Y          YF YE YD ... Y2 Y1 Y0   \n\
+prefetchnta (%0)          # Tell CPU not to cache output YUYV data     \n\
+prefetchnta (%1)          # Tell CPU not to cache output YUYV data     \n\
+punpcklbw %%xmm2, %%xmm1  #                    00 00 ... v1 u1 v0 u0   \n\
+movdqa    %%xmm0, %%xmm2  #                    yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm1, %%xmm2  #                    v3 y7 ... v0 y1 u0 y0   \n\
+movdqu    %%xmm2, (%0)    # Store low YUYV                             \n\
+punpckhbw %%xmm1, %%xmm0  #                    v7 yF ... v4 y9 u4 y8   \n\
+movdqu    %%xmm0, 16(%0)  # Store high YUYV                            \n\
+movdqa    %%xmm3, %%xmm4  #                    YF YE YD ... Y2 Y1 Y0   \n\
+punpcklbw %%xmm1, %%xmm4  #                    v3 Y7 ... v0 Y1 u0 Y0   \n\
+movdqu    %%xmm4, (%1)    # Store low YUYV                             \n\
+punpckhbw %%xmm1, %%xmm3  #                    v7 YF ... v4 Y9 u4 Y8   \n\
+movdqu    %%xmm3, 16(%1)  # Store high YUYV                            \n\
 "
 
-#define SSE2_YUV420_YVYU_ALIGNED "                                        \n\
-movdqa      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movdqa      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
-punpcklbw %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-movdqa    %%xmm0, %%xmm1  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
-punpcklbw %%xmm2, %%xmm1  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\
-movntdq   %%xmm1, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm2, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\
-movntdq   %%xmm0, 16(%0)  # Store high YUYV                               \n\
-movdqa    %%xmm3, %%xmm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
-punpcklbw %%xmm2, %%xmm4  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0   \n\
-movntdq   %%xmm4, (%1)    # Store low YUYV                                \n\
-punpckhbw %%xmm2, %%xmm3  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4   \n\
-movntdq   %%xmm3, 16(%1)  # Store high YUYV                               \n\
+#define SSE2_YUV420_YVYU_ALIGNED "                                     \n\
+movdqa      (%2), %%xmm0  # Load 16 Y          yF yE yD ... y2 y1 y0   \n\
+movdqa      (%3), %%xmm3  # Load 16 Y          YF YE YD ... Y2 Y1 Y0   \n\
+punpcklbw %%xmm1, %%xmm2  #                    u7 v7 ... u1 v1 u0 v0   \n\
+movdqa    %%xmm0, %%xmm1  #                    yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm2, %%xmm1  #                    u3 y7 ... u0 y1 v0 y0   \n\
+movntdq   %%xmm1, (%0)    # Store low YUYV                             \n\
+punpckhbw %%xmm2, %%xmm0  #                    u7 yF ... u4 y9 v4 y8   \n\
+movntdq   %%xmm0, 16(%0)  # Store high YUYV                            \n\
+movdqa    %%xmm3, %%xmm4  #                    YF YE YD ... Y2 Y1 Y0   \n\
+punpcklbw %%xmm2, %%xmm4  #                    u3 Y7 ... u0 Y1 v0 Y0   \n\
+movntdq   %%xmm4, (%1)    # Store low YUYV                             \n\
+punpckhbw %%xmm2, %%xmm3  #                    u7 YF ... u4 Y9 v4 Y8   \n\
+movntdq   %%xmm3, 16(%1)  # Store high YUYV                            \n\
 "
 
-#define SSE2_YUV420_YVYU_UNALIGNED "                                      \n\
-movdqu      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movdqu      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
-prefetchnta (%0)          # Tell CPU not to cache output YVYU data        \n\
-prefetchnta (%1)          # Tell CPU not to cache output YVYU data        \n\
-punpcklbw %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-movdqu    %%xmm0, %%xmm1  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
-punpcklbw %%xmm2, %%xmm1  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\
-movdqu    %%xmm1, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm2, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\
-movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\
-movdqu    %%xmm3, %%xmm4  #                     Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
-punpcklbw %%xmm2, %%xmm4  #                     u1 Y3 v1 Y2 u0 Y1 v0 Y0   \n\
-movdqu    %%xmm4, (%1)    # Store low YUYV                                \n\
-punpckhbw %%xmm2, %%xmm3  #                     u3 Y7 v3 Y6 u2 Y5 v2 Y4   \n\
-movdqu    %%xmm3, 16(%1)  # Store high YUYV                               \n\
+#define SSE2_YUV420_YVYU_UNALIGNED "                                    \n\
+movdqu      (%2), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movdqu      (%3), %%xmm3  # Load 16 Y           YF YE YD ... Y2 Y1 Y0   \n\
+prefetchnta (%0)          # Tell CPU not to cache output YVYU data      \n\
+prefetchnta (%1)          # Tell CPU not to cache output YVYU data      \n\
+punpcklbw %%xmm1, %%xmm2  #                     u7 v7 ... u1 v1 u0 v0   \n\
+movdqu    %%xmm0, %%xmm1  #                     yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm2, %%xmm1  #                     u3 y7 ... u0 y1 v0 y0   \n\
+movdqu    %%xmm1, (%0)    # Store low YUYV                              \n\
+punpckhbw %%xmm2, %%xmm0  #                     u7 yF ... u4 y9 v4 y8   \n\
+movdqu    %%xmm0, 16(%0)  # Store high YUYV                             \n\
+movdqu    %%xmm3, %%xmm4  #                     YF YE YD ... Y2 Y1 Y0   \n\
+punpcklbw %%xmm2, %%xmm4  #                     u3 Y7 ... u0 Y1 v0 Y0   \n\
+movdqu    %%xmm4, (%1)    # Store low YUYV                              \n\
+punpckhbw %%xmm2, %%xmm3  #                     u7 YF ... u4 Y9 v4 Y8   \n\
+movdqu    %%xmm3, 16(%1)  # Store high YUYV                             \n\
 "
 
-#define SSE2_YUV420_UYVY_ALIGNED "                                        \n\
-movdqa      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movdqa      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
-punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-movdqa    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
-movntdq   %%xmm2, (%0)    # Store low UYVY                                \n\
-movdqa    %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-punpckhbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
-movntdq   %%xmm2, 16(%0)  # Store high UYVY                               \n\
-movdqa    %%xmm1, %%xmm4  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-punpcklbw %%xmm3, %%xmm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0   \n\
-movntdq   %%xmm4, (%1)    # Store low UYVY                                \n\
-punpckhbw %%xmm3, %%xmm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2   \n\
-movntdq   %%xmm1, 16(%1)  # Store high UYVY                               \n\
+#define SSE2_YUV420_UYVY_ALIGNED "                                      \n\
+movdqa      (%2), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movdqa      (%3), %%xmm3  # Load 16 Y           YF YE YD ... Y2 Y1 Y0   \n\
+punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
+movdqa    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpcklbw %%xmm0, %%xmm2  #                     y7 v3 ... y1 v0 y0 u0   \n\
+movntdq   %%xmm2, (%0)    # Store low UYVY                              \n\
+movdqa    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpckhbw %%xmm0, %%xmm2  #                     yF v7 ... y9 v4 y8 u4   \n\
+movntdq   %%xmm2, 16(%0)  # Store high UYVY                             \n\
+movdqa    %%xmm1, %%xmm4  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpcklbw %%xmm3, %%xmm4  #                     Y7 v3 ... Y1 v0 Y0 u0   \n\
+movntdq   %%xmm4, (%1)    # Store low UYVY                              \n\
+punpckhbw %%xmm3, %%xmm1  #                     YF v7 ... Y9 v4 Y8 u4   \n\
+movntdq   %%xmm1, 16(%1)  # Store high UYVY                             \n\
 "
 
-#define SSE2_YUV420_UYVY_UNALIGNED "                                      \n\
-movdqu      (%2), %%xmm0  # Load 16 Y           y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movdqu      (%3), %%xmm3  # Load 16 Y           Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0   \n\
-prefetchnta (%0)          # Tell CPU not to cache output UYVY data        \n\
-prefetchnta (%1)          # Tell CPU not to cache output UYVY data        \n\
-punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-movdqu    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
-movdqu    %%xmm2, (%0)    # Store low UYVY                                \n\
-movdqu    %%xmm1, %%xmm2  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-punpckhbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
-movdqu    %%xmm2, 16(%0)  # Store high UYVY                               \n\
-movdqu    %%xmm1, %%xmm4  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-punpcklbw %%xmm3, %%xmm4  #                     Y3 v1 Y2 u1 Y1 v0 Y0 u0   \n\
-movdqu    %%xmm4, (%1)    # Store low UYVY                                \n\
-punpckhbw %%xmm3, %%xmm1  #                     Y7 v3 Y6 u3 Y5 v2 Y4 u2   \n\
-movdqu    %%xmm1, 16(%1)  # Store high UYVY                               \n\
+#define SSE2_YUV420_UYVY_UNALIGNED "                                    \n\
+movdqu      (%2), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movdqu      (%3), %%xmm3  # Load 16 Y           YF YE YD ... Y2 Y1 Y0   \n\
+prefetchnta (%0)          # Tell CPU not to cache output UYVY data      \n\
+prefetchnta (%1)          # Tell CPU not to cache output UYVY data      \n\
+punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
+movdqu    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpcklbw %%xmm0, %%xmm2  #                     y7 v3 ... y1 v0 y0 u0   \n\
+movdqu    %%xmm2, (%0)    # Store low UYVY                              \n\
+movdqu    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpckhbw %%xmm0, %%xmm2  #                     yF v7 ... y9 v4 y8 u4   \n\
+movdqu    %%xmm2, 16(%0)  # Store high UYVY                             \n\
+movdqu    %%xmm1, %%xmm4  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpcklbw %%xmm3, %%xmm4  #                     Y7 v3 ... Y1 v0 Y0 u0   \n\
+movdqu    %%xmm4, (%1)    # Store low UYVY                              \n\
+punpckhbw %%xmm3, %%xmm1  #                     YF v7 ... Y9 v4 Y8 u4   \n\
+movdqu    %%xmm1, 16(%1)  # Store high UYVY                             \n\
 "
 
 #elif defined(HAVE_SSE2_INTRINSICS)
diff --git a/modules/video_chroma/i422_yuy2.h b/modules/video_chroma/i422_yuy2.h
index dfa4cb4b3e..542d7a615a 100644
--- a/modules/video_chroma/i422_yuy2.h
+++ b/modules/video_chroma/i422_yuy2.h
@@ -150,79 +150,79 @@ movq      %%mm1, 8(%0)  # Store high UYVY                                 \n\
 
 #define SSE2_END  __asm__ __volatile__ ( "sfence" ::: "memory" )
 
-#define SSE2_YUV422_YUYV_ALIGNED "                                        \n\
-movdqa      (%1), %%xmm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movq        (%2), %%xmm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0   \n\
-movq        (%3), %%xmm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0   \n\
-punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-movdqa    %%xmm0, %%xmm2  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
-punpcklbw %%xmm1, %%xmm2  #                     v1 y3 u1 y2 v0 y1 u0 y0   \n\
-movntdq   %%xmm2, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm0  #                     v3 y7 u3 y6 v2 y5 u2 y4   \n\
-movntdq   %%xmm0, 16(%0)  # Store high YUYV                               \n\
+#define SSE2_YUV422_YUYV_ALIGNED "                                      \n\
+movdqa      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
+movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
+punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
+movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm1, %%xmm2  #                     v3 y7 ... v0 y1 u0 y0   \n\
+movntdq   %%xmm2, (%0)    # Store low YUYV                              \n\
+punpckhbw %%xmm1, %%xmm0  #                     v7 yF ... v4 y9 u4 y8   \n\
+movntdq   %%xmm0, 16(%0)  # Store high YUYV                             \n\
 "
 
-#define SSE2_YUV422_YUYV_UNALIGNED "                                      \n\
-movdqu      (%1), %%xmm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movq        (%2), %%xmm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0   \n\
-movq        (%3), %%xmm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0   \n\
-prefetchnta (%0)          # Tell CPU not to cache output YUYV data        \n\
-punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-movdqa    %%xmm0, %%xmm2  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
-punpcklbw %%xmm1, %%xmm2  #                     v1 y3 u1 y2 v0 y1 u0 y0   \n\
-movdqu    %%xmm2, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm0  #                     v3 y7 u3 y6 v2 y5 u2 y4   \n\
-movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\
+#define SSE2_YUV422_YUYV_UNALIGNED "                                    \n\
+movdqu      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
+movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
+prefetchnta (%0)          # Tell CPU not to cache output YUYV data      \n\
+punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
+movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm1, %%xmm2  #                     v3 y7 ... v0 y1 u0 y0   \n\
+movdqu    %%xmm2, (%0)    # Store low YUYV                              \n\
+punpckhbw %%xmm1, %%xmm0  #                     v7 yF ... v4 y9 u4 y8   \n\
+movdqu    %%xmm0, 16(%0)  # Store high YUYV                             \n\
 "
 
-#define SSE2_YUV422_YVYU_ALIGNED "                                        \n\
-movdqa      (%1), %%xmm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movq        (%2), %%xmm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0   \n\
-movq        (%3), %%xmm1  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0   \n\
-punpcklbw %%xmm2, %%xmm1  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-movdqa    %%xmm0, %%xmm2  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
-punpcklbw %%xmm1, %%xmm2  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\
-movntdq   %%xmm2, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\
-movntdq   %%xmm0, 16(%0)  # Store high YUYV                               \n\
+#define SSE2_YUV422_YVYU_ALIGNED "                                      \n\
+movdqa      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movq        (%2), %%xmm2  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
+movq        (%3), %%xmm1  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
+punpcklbw %%xmm2, %%xmm1  #                     u7 v7 ... u1 v1 u0 v0   \n\
+movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm1, %%xmm2  #                     u3 y7 ... u0 y1 v0 y0   \n\
+movntdq   %%xmm2, (%0)    # Store low YUYV                              \n\
+punpckhbw %%xmm1, %%xmm0  #                     u7 yF ... u4 y9 v4 y8   \n\
+movntdq   %%xmm0, 16(%0)  # Store high YUYV                             \n\
 "
 
-#define SSE2_YUV422_YVYU_UNALIGNED "                                      \n\
-movdqu      (%1), %%xmm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movq        (%2), %%xmm2  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0   \n\
-movq        (%3), %%xmm1  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0   \n\
-prefetchnta (%0)          # Tell CPU not to cache output YUYV data        \n\
-punpcklbw %%xmm2, %%xmm1  #                     u3 v3 u2 v2 u1 v1 u0 v0   \n\
-movdqa    %%xmm0, %%xmm2  #                     y7 y6 y5 y4 y3 y2 y1 y0   \n\
-punpcklbw %%xmm1, %%xmm2  #                     u1 y3 v1 y2 u0 y1 v0 y0   \n\
-movdqu    %%xmm2, (%0)    # Store low YUYV                                \n\
-punpckhbw %%xmm1, %%xmm0  #                     u3 y7 v3 y6 u2 y5 v2 y4   \n\
-movdqu    %%xmm0, 16(%0)  # Store high YUYV                               \n\
+#define SSE2_YUV422_YVYU_UNALIGNED "                                    \n\
+movdqu      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movq        (%2), %%xmm2  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
+movq        (%3), %%xmm1  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
+prefetchnta (%0)          # Tell CPU not to cache output YUYV data      \n\
+punpcklbw %%xmm2, %%xmm1  #                     u7 v7 ... u1 v1 u0 v0   \n\
+movdqa    %%xmm0, %%xmm2  #                     yF yE yD ... y2 y1 y0   \n\
+punpcklbw %%xmm1, %%xmm2  #                     u3 y7 ... u0 y1 v0 y0   \n\
+movdqu    %%xmm2, (%0)    # Store low YUYV                              \n\
+punpckhbw %%xmm1, %%xmm0  #                     u7 yF ... u4 y9 v4 y8   \n\
+movdqu    %%xmm0, 16(%0)  # Store high YUYV                             \n\
 "
 
-#define SSE2_YUV422_UYVY_ALIGNED "                                        \n\
-movdqa      (%1), %%xmm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movq        (%2), %%xmm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0   \n\
-movq        (%3), %%xmm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0   \n\
-punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-movdqa    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
-movntdq   %%xmm2, (%0)    # Store low UYVY                                \n\
-punpckhbw %%xmm0, %%xmm1  #                     y7 v3 y6 u3 y5 v2 y4 u2   \n\
-movntdq   %%xmm1, 16(%0)  # Store high UYVY                               \n\
+#define SSE2_YUV422_UYVY_ALIGNED "                                      \n\
+movdqa      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
+movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
+punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
+movdqa    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpcklbw %%xmm0, %%xmm2  #                     y7 v3 ... y1 v0 y0 u0   \n\
+movntdq   %%xmm2, (%0)    # Store low UYVY                              \n\
+punpckhbw %%xmm0, %%xmm1  #                     yF v7 ... y9 v4 y8 u4   \n\
+movntdq   %%xmm1, 16(%0)  # Store high UYVY                             \n\
 "
 
-#define SSE2_YUV422_UYVY_UNALIGNED "                                      \n\
-movdqu      (%1), %%xmm0  # Load 8 Y            y7 y6 y5 y4 y3 y2 y1 y0   \n\
-movq        (%2), %%xmm1  # Load 4 Cb           00 00 00 00 u3 u2 u1 u0   \n\
-movq        (%3), %%xmm2  # Load 4 Cr           00 00 00 00 v3 v2 v1 v0   \n\
-prefetchnta (%0)          # Tell CPU not to cache output YUYV data        \n\
-punpcklbw %%xmm2, %%xmm1  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-movdqa    %%xmm1, %%xmm2  #                     v3 u3 v2 u2 v1 u1 v0 u0   \n\
-punpcklbw %%xmm0, %%xmm2  #                     y3 v1 y2 u1 y1 v0 y0 u0   \n\
-movdqu    %%xmm2, (%0)    # Store low UYVY                                \n\
-punpckhbw %%xmm0, %%xmm1  #                     y7 v3 y6 u3 y5 v2 y4 u2   \n\
-movdqu    %%xmm1, 16(%0)  # Store high UYVY                               \n\
+#define SSE2_YUV422_UYVY_UNALIGNED "                                    \n\
+movdqu      (%1), %%xmm0  # Load 16 Y           yF yE yD ... y2 y1 y0   \n\
+movq        (%2), %%xmm1  # Load 8 Cb           00 00 00 ... u2 u1 u0   \n\
+movq        (%3), %%xmm2  # Load 8 Cr           00 00 00 ... v2 v1 v0   \n\
+prefetchnta (%0)          # Tell CPU not to cache output YUYV data      \n\
+punpcklbw %%xmm2, %%xmm1  #                     v7 u7 ... v1 u1 v0 u0   \n\
+movdqa    %%xmm1, %%xmm2  #                     v7 u7 ... v1 u1 v0 u0   \n\
+punpcklbw %%xmm0, %%xmm2  #                     y7 v3 ... y1 v0 y0 u0   \n\
+movdqu    %%xmm2, (%0)    # Store low UYVY                              \n\
+punpckhbw %%xmm0, %%xmm1  #                     yF v7 ... y9 v4 y8 u4   \n\
+movdqu    %%xmm1, 16(%0)  # Store high UYVY                             \n\
 "
 
 #elif defined(HAVE_SSE2_INTRINSICS)




More information about the vlc-commits mailing list