[XviD-devel] improving data flow during interpolation
xvid-devel@xvid.org
xvid-devel@xvid.org
Wed, 14 Aug 2002 19:24:16 +0200
--/9DWx/yDrRhgMJTb
Content-Type: text/plain; charset=us-ascii
Content-Disposition: inline
Hi all,
here is a suggestion for B-frame interpolation in the decoder:
in decoder.c, around line 1017, two buffers (cur and refn[2])
are predicted and then subsequently mixed together with help
of interpolate8x8_c(). From what I measured, there is a somewhat
big gain in combining the hp-interpolate and final mixing
together, inside some functions that would for instance be
called interpolate8x8_halfpel_add_v/hv/etc...
As far as I-SSE is concerned (my preferred target), it's just
a matter of inserting a 'pavgb dst, val' before the final
'movq dst, val' storage. One instr only. It would save a
refn[2] memory consumption, as well as one final averaging
pass. The con is that it requires writing one additional
full round of interpolate8x8_halfpel_add_XXX functions.
Fortunately, I've already got'em written, so it's just a
matter of copy-pasting if you're interested. I attach
the code so you can have a look (I can do the integration
in xvid afterward). Note: don't be surprised the code
resembles a lot what's currently in xvid, it's almost
the one I sent Michael in replacement from the previous one
(straight from my own 'lab' toy codec). There are also
16x8 block motion version (I *think* it improves
cache coherency for luma blocks, althought I didn't
measured it...)
What do you think of it?
I'll be on holidays (yes Michael, again:) next
week, but after I really like to volonteer
for working on this coupled trans/quant
funcs and prediction...
take care,
Skal
--/9DWx/yDrRhgMJTb
Content-Type: text/plain; charset=us-ascii
Content-Disposition: attachment; filename="skl_mb_sse.asm"
;//////////////////////////////////////////////////////////////////////
;// Macro-block processing
;//////////////////////////////////////////////////////////////////////
; [BITS 32]
%include "../../include/skl_syst/skl_nasm.h"
globl Skl_Add_8_FF_SSE
globl Skl_Add_8_FH_SSE
globl Skl_Add_8_HF_SSE
globl Skl_Add_8_HH_SSE
globl Skl_Add_16_FF_SSE
globl Skl_Add_16_FH_SSE
globl Skl_Add_16_HF_SSE
globl Skl_Add_16_HH_SSE
globl Skl_Copy_8_FF_SSE
globl Skl_Copy_8_FH_SSE
globl Skl_Copy_8_HF_SSE
globl Skl_Copy_8_HH_SSE
globl Skl_Copy_16_FF_SSE
globl Skl_Copy_16_FH_SSE
globl Skl_Copy_16_HF_SSE
globl Skl_Copy_16_HH_SSE
DATA
align 16
Rounder1_SSE:
times 4 dw 1
Mask1_SSE:
times 8 db 1
TEXT
; all funcs are signed: (uint8_t *Dst, const uint8_t *Src,
; const uint32_t BpS, const uint32_t Rounding)
; even if 'rounding' is not used (Copy_FF/Add_FF)
%macro PROLOG0 0
emms
mov ecx, [esp+ 4] ; Dst
mov eax, [esp+ 8] ; Src
mov edx, [esp+12] ; BpS
%endmacro
%macro PROLOG1 0
PROLOG0
test dword [esp+16], 1; Rounding?
%endmacro
%macro EPILOG 0
emms
ret
%endmacro
;//////////////////////////////////////////////////////////////////////
; Full-Full funcs
;//////////////////////////////////////////////////////////////////////
%macro ADD_FF 2
movq mm0, [eax+%1]
movq mm1, [eax+%2]
pavgb mm0, [ecx+%1]
pavgb mm1, [ecx+%2]
movq [ecx+%1], mm0
movq [ecx+%2], mm1
%endmacro
align 16
Skl_Add_8_FF_SSE: ; 23c
PROLOG1
ADD_FF 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FF 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FF 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FF 0, edx
EPILOG
align 16
Skl_Add_16_FF_SSE: ; 44c
PROLOG0
ADD_FF 0, 8
add eax, edx
add ecx, edx
ADD_FF 0, 8
add eax, edx
add ecx, edx
ADD_FF 0, 8
add eax, edx
add ecx, edx
ADD_FF 0, 8
add eax, edx
add ecx, edx
ADD_FF 0, 8
add eax, edx
add ecx, edx
ADD_FF 0, 8
add eax, edx
add ecx, edx
ADD_FF 0, 8
add eax, edx
add ecx, edx
ADD_FF 0, 8
EPILOG
%macro COPY_FF_8 1 ; %1:phase
movq mm0, [eax]
movq mm1, [eax+edx]
movq [ecx], mm0
%if (%1!=1)
lea eax, [eax+2*edx]
%endif
movq [ecx+edx], mm1
%if (%1!=1)
lea ecx, [ecx+2*edx]
%endif
%endmacro
align 16
Skl_Copy_8_FF_SSE: ; 12c
PROLOG0
COPY_FF_8 0
COPY_FF_8 0
COPY_FF_8 0
COPY_FF_8 1
EPILOG
%macro COPY_FF_16 1 ; %1:phase
movq mm0, [eax]
movq mm1, [eax+8]
movq mm2, [eax+edx]
movq mm3, [eax+edx+8]
%if (%1!=1)
lea eax, [eax+2*edx]
%endif
movq [ecx], mm0
movq [ecx+8], mm1
movq [ecx+edx], mm2
movq [ecx+edx+8], mm3
%if (%1!=1)
lea ecx, [ecx+2*edx]
%endif
%endmacro
align 16
Skl_Copy_16_FF_SSE: ; 26c
PROLOG0
COPY_FF_16 0
COPY_FF_16 0
COPY_FF_16 0
COPY_FF_16 1
EPILOG
;//////////////////////////////////////////////////////////////////////
; Full-Half funcs
;//////////////////////////////////////////////////////////////////////
%macro ADD_FH_RND0 2
movq mm0, [eax+%1]
movq mm1, [eax+%2]
pavgb mm0, [eax+%1+1]
pavgb mm1, [eax+%2+1]
pavgb mm0, [ecx+%1]
pavgb mm1, [ecx+%2]
movq [ecx+%1],mm0
movq [ecx+%2],mm1
%endmacro
%macro ADD_FH_RND1 2
movq mm0, [eax+%1]
movq mm1, [eax+%2]
movq mm4, mm0
movq mm5, mm1
movq mm2, [eax+%1+1]
movq mm3, [eax+%2+1]
pavgb mm0, mm2
; lea ??
pxor mm2, mm4
pavgb mm1, mm3
pxor mm3, mm5
pand mm2, [Mask1_SSE]
pand mm3, [Mask1_SSE]
psubb mm0, mm2
psubb mm1, mm3
pavgb mm0, [ecx+%1]
pavgb mm1, [ecx+%2]
movq [ecx+%1],mm0
movq [ecx+%2],mm1
%endmacro
%macro COPY_FH_RND0 2
movq mm0, [eax+%1]
movq mm1, [eax+%2]
pavgb mm0, [eax+%1+1]
pavgb mm1, [eax+%2+1]
movq [ecx+%1],mm0
movq [ecx+%2],mm1
%endmacro
%macro COPY_FH_RND1 2
movq mm0, [eax+%1]
movq mm1, [eax+%2]
movq mm4, mm0
movq mm5, mm1
movq mm2, [eax+%1+1]
movq mm3, [eax+%2+1]
pavgb mm0, mm2
; lea ??
pxor mm2, mm4
pavgb mm1, mm3
; lea??
pxor mm3, mm5
pand mm2, [Mask1_SSE]
pand mm3, [Mask1_SSE]
psubb mm0, mm2
psubb mm1, mm3
movq [ecx+%1],mm0
movq [ecx+%2],mm1
%endmacro
align 16
Skl_Add_8_FH_SSE: ; 32c
PROLOG1
jnz near .Loop1
ADD_FH_RND0 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FH_RND0 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FH_RND0 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FH_RND0 0, edx
EPILOG
.Loop1
; we use: (i+j)/2 = ( i+j+1 )/2 - (i^j)&1
; movq mm7, [Mask1_SSE]
ADD_FH_RND1 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FH_RND1 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FH_RND1 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
ADD_FH_RND1 0, edx
EPILOG
align 16
Skl_Add_16_FH_SSE: ; 64c
PROLOG1
jnz near .Loop1
ADD_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND0 0, 8
EPILOG
.Loop1
ADD_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
ADD_FH_RND1 0, 8
EPILOG
align 16
Skl_Copy_8_FH_SSE:
PROLOG1
jnz .Loop1
COPY_FH_RND0 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
COPY_FH_RND0 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
COPY_FH_RND0 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
COPY_FH_RND0 0, edx
EPILOG
.Loop1
COPY_FH_RND1 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
COPY_FH_RND1 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
COPY_FH_RND1 0, edx
lea eax,[eax+2*edx]
lea ecx,[ecx+2*edx]
COPY_FH_RND1 0, edx
EPILOG
align 16
Skl_Copy_16_FH_SSE:
PROLOG1
jnz near .Loop1
COPY_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND0 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND0 0, 8
EPILOG
.Loop1
COPY_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND1 0, 8
lea eax,[eax+edx]
lea ecx,[ecx+edx]
COPY_FH_RND1 0, 8
EPILOG
;//////////////////////////////////////////////////////////////////////
; Half-Full funcs
;//////////////////////////////////////////////////////////////////////
%macro ADD_8_HF_RND0 0
movq mm0, [eax]
movq mm1, [eax+edx]
pavgb mm0, mm1
pavgb mm1, [eax+2*edx]
lea eax,[eax+2*edx]
pavgb mm0, [ecx]
pavgb mm1, [ecx+edx]
movq [ecx],mm0
movq [ecx+edx],mm1
%endmacro
%macro ADD_8_HF_RND1 0
movq mm1, [eax+edx]
movq mm2, [eax+2*edx]
lea eax,[eax+2*edx]
movq mm4, mm0
movq mm5, mm1
pavgb mm0, mm1
pxor mm4, mm1
pavgb mm1, mm2
pxor mm5, mm2
pand mm4, mm7 ; lsb's of (i^j)...
pand mm5, mm7 ; lsb's of (i^j)...
psubb mm0, mm4 ; ...are substracted from result of pavgb
pavgb mm0, [ecx]
movq [ecx], mm0
psubb mm1, mm5 ; ...are substracted from result of pavgb
pavgb mm1, [ecx+edx]
movq [ecx+edx], mm1
%endmacro
align 16
Skl_Add_8_HF_SSE:
PROLOG1
jnz near .Loop1
pxor mm7, mm7 ; this is a NOP
ADD_8_HF_RND0
lea ecx,[ecx+2*edx]
ADD_8_HF_RND0
lea ecx,[ecx+2*edx]
ADD_8_HF_RND0
lea ecx,[ecx+2*edx]
ADD_8_HF_RND0
EPILOG
.Loop1
movq mm0, [eax] ; loop invariant
movq mm7, [Mask1_SSE]
ADD_8_HF_RND1
movq mm0, mm2
lea ecx,[ecx+2*edx]
ADD_8_HF_RND1
movq mm0, mm2
lea ecx,[ecx+2*edx]
ADD_8_HF_RND1
movq mm0, mm2
lea ecx,[ecx+2*edx]
ADD_8_HF_RND1
EPILOG
%macro COPY_8_HF_RND0 0
movq mm0, [eax]
movq mm1, [eax+edx]
pavgb mm0, mm1
pavgb mm1, [eax+2*edx]
lea eax,[eax+2*edx]
movq [ecx],mm0
movq [ecx+edx],mm1
%endmacro
%macro COPY_8_HF_RND1 0
movq mm1, [eax+edx]
movq mm2, [eax+2*edx]
lea eax,[eax+2*edx]
movq mm4, mm0
movq mm5, mm1
pavgb mm0, mm1
pxor mm4, mm1
pavgb mm1, mm2
pxor mm5, mm2
pand mm4, mm7 ; lsb's of (i^j)...
pand mm5, mm7 ; lsb's of (i^j)...
psubb mm0, mm4 ; ...are substracted from result of pavgb
movq [ecx], mm0
psubb mm1, mm5 ; ...are substracted from result of pavgb
movq [ecx+edx], mm1
%endmacro
align 16
Skl_Copy_8_HF_SSE:
PROLOG1
jnz near .Loop1
pxor mm7, mm7 ; this is a NOP
COPY_8_HF_RND0
movq mm0, mm2
lea ecx,[ecx+2*edx]
COPY_8_HF_RND0
movq mm0, mm2
lea ecx,[ecx+2*edx]
COPY_8_HF_RND0
movq mm0, mm2
lea ecx,[ecx+2*edx]
COPY_8_HF_RND0
EPILOG
.Loop1
movq mm0, [eax] ; loop invariant
movq mm7, [Mask1_SSE]
COPY_8_HF_RND1
movq mm0, mm2
lea ecx,[ecx+2*edx]
COPY_8_HF_RND1
movq mm0, mm2
lea ecx,[ecx+2*edx]
COPY_8_HF_RND1
movq mm0, mm2
lea ecx,[ecx+2*edx]
COPY_8_HF_RND1
EPILOG
;//16x8 version ///////////////////////////////////////////////////////
%macro ADD_16_HF_RND0 0
movq mm0, [eax]
movq mm4, [eax+8]
movq mm1, [eax+edx]
movq mm5, [eax+edx+8]
pavgb mm0, mm1
pavgb mm4, mm5
pavgb mm1, [eax+2*edx]
pavgb mm5, [eax+2*edx+8]
lea eax,[eax+2*edx]
pavgb mm0, [ecx]
pavgb mm4, [ecx+8]
pavgb mm1, [ecx+edx]
pavgb mm5, [ecx+edx+8]
movq [ecx],mm0
movq [ecx+8],mm4
movq [ecx+edx],mm1
movq [ecx+edx+8],mm5
%endmacro
%macro ADD_16_HF_RND1 0
movq mm1, [eax+edx]
movq mm2, [eax+2*edx]
movq mm3, mm0
movq mm4, mm1
pavgb mm0, mm1
pxor mm3, mm1
pavgb mm1, mm2
pxor mm4, mm2
pand mm3, [Mask1_SSE] ; lsb's of (i^j)...
pand mm4, [Mask1_SSE] ; lsb's of (i^j)...
psubb mm0, mm3 ; ...are substracted from result of pavgb
pavgb mm0, [ecx]
movq [ecx], mm0
psubb mm1, mm4 ; ...are substracted from result of pavgb
pavgb mm1, [ecx+edx]
movq [ecx+edx], mm1
movq mm0, mm2 ; preserved
movq mm1, [eax+edx+8]
movq mm2, [eax+2*edx+8]
lea eax,[eax+2*edx]
movq mm3, mm5
movq mm4, mm1
pavgb mm5, mm1
pxor mm3, mm1
pavgb mm1, mm2
pxor mm4, mm2
pand mm3, [Mask1_SSE] ; lsb's of (i^j)...
pand mm4, [Mask1_SSE] ; lsb's of (i^j)...
psubb mm5, mm3 ; ...are substracted from result of pavgb
pavgb mm5, [ecx+8]
movq [ecx+8], mm5
psubb mm1, mm4 ; ...are substracted from result of pavgb
pavgb mm1, [ecx+edx+8]
movq [ecx+edx+8], mm1
%endmacro
align 16
Skl_Add_16_HF_SSE:
PROLOG1
jnz near .Loop1
pxor mm7, mm7 ; this is a NOP
ADD_16_HF_RND0
lea ecx,[ecx+2*edx]
ADD_16_HF_RND0
lea ecx,[ecx+2*edx]
ADD_16_HF_RND0
lea ecx,[ecx+2*edx]
ADD_16_HF_RND0
EPILOG
.Loop1
movq mm0, [eax] ; loop invariants
movq mm5, [eax+8] ; loop invariants
ADD_16_HF_RND1
movq mm5, mm2
lea ecx,[ecx+2*edx]
ADD_16_HF_RND1
movq mm5, mm2
lea ecx,[ecx+2*edx]
ADD_16_HF_RND1
movq mm5, mm2
lea ecx,[ecx+2*edx]
ADD_16_HF_RND1
EPILOG
%macro COPY_16_HF_RND0 0
movq mm0, [eax]
movq mm4, [eax+8]
movq mm1, [eax+edx]
movq mm5, [eax+edx+8]
pavgb mm0, mm1
pavgb mm4, mm5
pavgb mm1, [eax+2*edx]
pavgb mm5, [eax+2*edx+8]
lea eax,[eax+2*edx]
movq [ecx],mm0
movq [ecx+8],mm4
movq [ecx+edx],mm1
movq [ecx+edx+8],mm5
%endmacro
%macro COPY_16_HF_RND1 0
movq mm1, [eax+edx]
movq mm2, [eax+2*edx]
movq mm3, mm0
movq mm4, mm1
pavgb mm0, mm1
pxor mm3, mm1
pavgb mm1, mm2
pxor mm4, mm2
pand mm3, [Mask1_SSE] ; lsb's of (i^j)...
pand mm4, [Mask1_SSE] ; lsb's of (i^j)...
psubb mm0, mm3 ; ...are substracted from result of pavgb
movq [ecx], mm0
psubb mm1, mm4 ; ...are substracted from result of pavgb
movq [ecx+edx], mm1
movq mm0, mm2 ; preserved
movq mm1, [eax+edx+8]
movq mm2, [eax+2*edx+8]
lea eax,[eax+2*edx]
movq mm3, mm5
movq mm4, mm1
pavgb mm5, mm1
pxor mm3, mm1
pavgb mm1, mm2
pxor mm4, mm2
pand mm3, [Mask1_SSE] ; lsb's of (i^j)...
pand mm4, [Mask1_SSE] ; lsb's of (i^j)...
psubb mm5, mm3 ; ...are substracted from result of pavgb
movq [ecx+8], mm5
psubb mm1, mm4 ; ...are substracted from result of pavgb
movq [ecx+edx+8], mm1
%endmacro
align 16
Skl_Copy_16_HF_SSE:
PROLOG1
jnz near .Loop1
pxor mm7, mm7 ; this is a NOP
COPY_16_HF_RND0
lea ecx,[ecx+2*edx]
COPY_16_HF_RND0
lea ecx,[ecx+2*edx]
COPY_16_HF_RND0
lea ecx,[ecx+2*edx]
COPY_16_HF_RND0
EPILOG
.Loop1
movq mm0, [eax] ; loop invariants
movq mm5, [eax+8] ; loop invariants
COPY_16_HF_RND1
movq mm5, mm2
lea ecx,[ecx+2*edx]
COPY_16_HF_RND1
movq mm5, mm2
lea ecx,[ecx+2*edx]
COPY_16_HF_RND1
movq mm5, mm2
lea ecx,[ecx+2*edx]
COPY_16_HF_RND1
EPILOG
;//////////////////////////////////////////////////////////////////////
; Half-Half funcs
;//////////////////////////////////////////////////////////////////////
; The trick is to correct the result of 'pavgb' with some combination of the
; lsb's of the 4 input values i,j,k,l, and their intermediate 'pavgb' (s and t).
; The boolean relations are:
; (i+j+k+l+3)/4 = (s+t+1)/2 - (ij&kl)&st
; (i+j+k+l+2)/4 = (s+t+1)/2 - (ij|kl)&st
; (i+j+k+l+1)/4 = (s+t+1)/2 - (ij&kl)|st
; (i+j+k+l+0)/4 = (s+t+1)/2 - (ij|kl)|st
; with s=(i+j+1)/2, t=(k+l+1)/2, ij = i^j, kl = k^l, st = s^t.
; Moreover, we process 2 lines at a times, for better overlapping (~15% faster).
%macro ADD_HH_RND0 0
lea eax,[eax+edx]
movq mm0, [eax]
movq mm1, [eax+1]
movq mm6, mm0
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step
lea eax,[eax+edx]
pxor mm1, mm6 ; mm1=(j^k). preserved for next step
por mm3, mm1 ; ij |= jk
movq mm6, mm2
pxor mm6, mm0 ; mm6 = s^t
pand mm3, mm6 ; (ij|jk) &= st
pavgb mm2, mm0 ; mm2 = (s+t+1)/2
pand mm3, mm7 ; mask lsb
psubb mm2, mm3 ; apply.
pavgb mm2, [ecx]
movq [ecx], mm2
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3 ; preserved for next iteration
lea ecx,[ecx+edx]
pxor mm3, mm6 ; preserved for next iteration
por mm1, mm3
movq mm6, mm0
pxor mm6, mm2
pand mm1, mm6
pavgb mm0, mm2
pand mm1, mm7
psubb mm0, mm1
pavgb mm0, [ecx]
movq [ecx], mm0
%endmacro
%macro ADD_HH_RND1 0
lea eax,[eax+edx]
movq mm0, [eax]
movq mm1, [eax+1]
movq mm6, mm0
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step
lea eax,[eax+edx]
pxor mm1, mm6 ; mm1=(j^k). preserved for next step
pand mm3, mm1
movq mm6, mm2
pxor mm6, mm0
por mm3, mm6
pavgb mm2, mm0
pand mm3, mm7
psubb mm2, mm3
pavgb mm2, [ecx]
movq [ecx], mm2
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3 ; preserved for next iteration
lea ecx,[ecx+edx]
pxor mm3, mm6 ; preserved for next iteration
pand mm1, mm3
movq mm6, mm0
pxor mm6, mm2
por mm1, mm6
pavgb mm0, mm2
pand mm1, mm7
psubb mm0, mm1
pavgb mm0, [ecx]
movq [ecx], mm0
%endmacro
align 16
Skl_Add_8_HH_SSE:
PROLOG1
movq mm7, [Mask1_SSE]
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
jnz near .Loop1
ADD_HH_RND0
add ecx, edx
ADD_HH_RND0
add ecx, edx
ADD_HH_RND0
add ecx, edx
ADD_HH_RND0
EPILOG
.Loop1
ADD_HH_RND1
add ecx, edx
ADD_HH_RND1
add ecx, edx
ADD_HH_RND1
add ecx, edx
ADD_HH_RND1
EPILOG
%macro COPY_HH_RND0 0
lea eax,[eax+edx]
movq mm0, [eax]
movq mm1, [eax+1]
movq mm6, mm0
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step
lea eax,[eax+edx]
pxor mm1, mm6 ; mm1=(j^k). preserved for next step
por mm3, mm1 ; ij |= jk
movq mm6, mm2
pxor mm6, mm0 ; mm6 = s^t
pand mm3, mm6 ; (ij|jk) &= st
pavgb mm2, mm0 ; mm2 = (s+t+1)/2
pand mm3, mm7 ; mask lsb
psubb mm2, mm3 ; apply.
movq [ecx], mm2
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3 ; preserved for next iteration
lea ecx,[ecx+edx]
pxor mm3, mm6 ; preserved for next iteration
por mm1, mm3
movq mm6, mm0
pxor mm6, mm2
pand mm1, mm6
pavgb mm0, mm2
pand mm1, mm7
psubb mm0, mm1
movq [ecx], mm0
%endmacro
%macro COPY_HH_RND1 0
lea eax,[eax+edx]
movq mm0, [eax]
movq mm1, [eax+1]
movq mm6, mm0
pavgb mm0, mm1 ; mm0=(j+k+1)/2. preserved for next step
lea eax,[eax+edx]
pxor mm1, mm6 ; mm1=(j^k). preserved for next step
pand mm3, mm1
movq mm6, mm2
pxor mm6, mm0
por mm3, mm6
pavgb mm2, mm0
pand mm3, mm7
psubb mm2, mm3
movq [ecx], mm2
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3 ; preserved for next iteration
lea ecx,[ecx+edx]
pxor mm3, mm6 ; preserved for next iteration
pand mm1, mm3
movq mm6, mm0
pxor mm6, mm2
por mm1, mm6
pavgb mm0, mm2
pand mm1, mm7
psubb mm0, mm1
movq [ecx], mm0
%endmacro
align 16
Skl_Copy_8_HH_SSE:
PROLOG1
movq mm7, [Mask1_SSE]
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
jnz near .Loop1
COPY_HH_RND0
add ecx, edx
COPY_HH_RND0
add ecx, edx
COPY_HH_RND0
add ecx, edx
COPY_HH_RND0
EPILOG
.Loop1
COPY_HH_RND1
add ecx, edx
COPY_HH_RND1
add ecx, edx
COPY_HH_RND1
add ecx, edx
COPY_HH_RND1
EPILOG
;//////////////////////////////////////////////////////////////////////
align 16
Skl_Add_16_HH_SSE:
PROLOG1
movq mm7, [Mask1_SSE]
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
jnz near .Loop1
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
mov ecx, [esp+ 4] ; Dst
mov eax, [esp+ 8] ; Src
add ecx, 8
add eax, 8
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
lea ecx,[ecx+edx]
ADD_HH_RND0
EPILOG
.Loop1
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
mov ecx, [esp+ 4] ; Dst
mov eax, [esp+ 8] ; Src
add ecx, 8
add eax, 8
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
lea ecx,[ecx+edx]
ADD_HH_RND1
EPILOG
align 16
Skl_Copy_16_HH_SSE:
PROLOG1
movq mm7, [Mask1_SSE]
; loop invariants: mm2=(i+j+1)/2 and mm3= i^j
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
jnz near .Loop1
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
mov ecx, [esp+ 4] ; Dst
mov eax, [esp+ 8] ; Src
add ecx, 8
add eax, 8
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
lea ecx,[ecx+edx]
COPY_HH_RND0
EPILOG
.Loop1
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
mov ecx, [esp+ 4] ; Dst
mov eax, [esp+ 8] ; Src
add ecx, 8
add eax, 8
movq mm2, [eax]
movq mm3, [eax+1]
movq mm6, mm2
pavgb mm2, mm3
pxor mm3, mm6 ; mm2/mm3 ready
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
lea ecx,[ecx+edx]
COPY_HH_RND1
EPILOG
;//////////////////////////////////////////////////////////////////////
--/9DWx/yDrRhgMJTb--