[XviD-devel] xvid_fdct_sse + qpel ASM

skal xvid-devel@xvid.org
20 Jan 2003 13:19:33 +0100


--=-dLq130355Ig4zdyzCliD
Content-Type: text/plain
Content-Transfer-Encoding: 7bit



	Hi,


	oops, I almost forget to post these old files.


	bye!


			Skal



PS: As is, the QPel stuff should be initialized
with:

xvid_QP_Funcs = &xvid_QP_Funcs_mmx;
xvid_QP_Add_Funcs = &xvid_QP_Add_Funcs_mmx;
xvid_Init_QP_mmx();

	until further integration (if any).

	

--=-dLq130355Ig4zdyzCliD
Content-Disposition: attachment; filename=fdct_sse.asm
Content-Transfer-Encoding: quoted-printable
Content-Type: text/plain; name=fdct_sse.asm; charset=ISO-8859-1

;/*************************************************************************=
****
; *
; *  XVID MPEG-4 VIDEO CODEC
; *  - SSE forward discrete cosine transform -
; *  Copyright(C) 2002 Pascal Massimino <skal@planet-d.net>
; *
; *  This file is part of XviD, a free MPEG-4 video encoder/decoder
; *
; *  XviD is free software; you can redistribute it and/or modify it
; *  under the terms of the GNU General Public License as published by
; *  the Free Software Foundation; either version 2 of the License, or
; *  (at your option) any later version.
; *
; *  This program is distributed in the hope that it will be useful,
; *  but WITHOUT ANY WARRANTY; without even the implied warranty of
; *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
; *  GNU General Public License for more details.
; *
; *  You should have received a copy of the GNU General Public License
; *  along with this program; if not, write to the Free Software
; *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 U=
SA
; *
; *  Under section 8 of the GNU General Public License, the copyright
; *  holders of XVID explicitly forbid distribution in the following
; *  countries:
; *
; * - Japan
; * - United States of America
; *
; *  Linking XviD statically or dynamically with other modules is making a
; *  combined work based on XviD.  Thus, the terms and conditions of the
; *  GNU General Public License cover the whole combination.
; *
; *  As a special exception, the copyright holders of XviD give you
; *  permission to link XviD with independent modules that communicate with
; *  XviD solely through the VFW1.1 and DShow interfaces, regardless of the
; *  license terms of these independent modules, and to copy and distribute
; *  the resulting combined work under terms of your choice, provided that
; *  every copy of the combined work is accompanied by a complete copy of
; *  the source code of XviD (the version of XviD used to produce the
; *  combined work), being distributed under the terms of the GNU General
; *  Public License plus this exception.  An independent module is a module
; *  which is not derived from or based on XviD.
; *
; *  Note that people who make modified versions of XviD are not obligated
; *  to grant this special exception for their modified versions; it is
; *  their choice whether to do so.  The GNU General Public License gives
; *  permission to release a modified version without this exception; this
; *  exception also makes it possible to release a modified version which
; *  carries forward this exception.
; *
; * $Id: fdct_sse.asm,v 1.1.2.1 2002/12/08 05:34:16 suxen_drol Exp $
; *
; *************************************************************************=
/

;/*************************************************************************=
*
; *
; *	History:
; *
; * 01.10.2002  creation - Skal -
; *
; *************************************************************************=
/

bits 32

%macro cglobal 1=20
	%ifdef PREFIX
		global _%1=20
		%define %1 _%1
	%else
		global %1
	%endif
%endmacro

cglobal xvid_fdct_sse

;//////////////////////////////////////////////////////////////////////
;
; Vertical pass is an implementation of the scheme:
;  Loeffler C., Ligtenberg A., and Moschytz C.S.:
;  Practical Fast 1D DCT Algorithm with Eleven Multiplications,
;  Proc. ICASSP 1989, 988-991.
;
; Horizontal pass is a double 4x4 vector/matrix multiplication,
; (see also Intel's Application Note 922:
;  http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
;  Copyright (C) 1999 Intel Corporation)
;
; Notes:
;  * tan(3pi/16) is greater than 0.5, and would use the
;    sign bit when turned into 16b fixed-point precision. So,
;    we use the trick: x*tan3 =3D x*(tan3-1)+x
;=20
;  * There's only one SSE-specific instruction (pshufw).
;    Porting to SSE2 also seems straightforward.
;
;  * There's still 1 or 2 ticks to save in fLLM_PASS, but
;    I prefer having a readable code, instead of a tightly=20
;    scheduled one...
;
;  * Quantization stage (as well as pre-transposition for the
;    idct way back) can be included in the fTab* constants
;    (with induced loss of precision, somehow)
;
;  * Some more details at: http://skal.planet-d.net/coding/dct.html
;
;//////////////////////////////////////////////////////////////////////=20
;
;   idct-like IEEE errors:
;
;  =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D
;  Peak error:   1.0000
;  Peak MSE:     0.0365
;  Overall MSE:  0.0201
;  Peak ME:      0.0265
;  Overall ME:   0.0006
;
;  =3D=3D Mean square errors =3D=3D
;   0.000 0.001 0.001 0.002 0.000 0.002 0.001 0.000    [0.001]
;   0.035 0.029 0.032 0.032 0.031 0.032 0.034 0.035    [0.032]
;   0.026 0.028 0.027 0.027 0.025 0.028 0.028 0.025    [0.027]
;   0.037 0.032 0.031 0.030 0.028 0.029 0.026 0.031    [0.030]
;   0.000 0.001 0.001 0.002 0.000 0.002 0.001 0.001    [0.001]
;   0.025 0.024 0.022 0.022 0.022 0.022 0.023 0.023    [0.023]
;   0.026 0.028 0.025 0.028 0.030 0.025 0.026 0.027    [0.027]
;   0.021 0.020 0.020 0.022 0.020 0.022 0.017 0.019    [0.020]
; =20
;  =3D=3D Abs Mean errors =3D=3D
;   0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000    [0.000]
;   0.020 0.001 0.003 0.003 0.000 0.004 0.002 0.003    [0.002]
;   0.000 0.001 0.001 0.001 0.001 0.004 0.000 0.000    [0.000]
;   0.027 0.001 0.000 0.002 0.002 0.002 0.001 0.000    [0.003]
;   0.000 0.000 0.000 0.000 0.000 0.001 0.000 0.001    [-0.000]
;   0.001 0.003 0.001 0.001 0.002 0.001 0.000 0.000    [-0.000]
;   0.000 0.002 0.002 0.001 0.001 0.002 0.001 0.000    [-0.000]
;   0.000 0.002 0.001 0.002 0.001 0.002 0.001 0.001    [-0.000]
; =20
;//////////////////////////////////////////////////////////////////////

section .data

align 16
tan1:    dw  0x32ec,0x32ec,0x32ec,0x32ec    ; tan( pi/16)
tan2:    dw  0x6a0a,0x6a0a,0x6a0a,0x6a0a    ; tan(2pi/16)  (=3Dsqrt(2)-1)
tan3:    dw  0xab0e,0xab0e,0xab0e,0xab0e    ; tan(3pi/16)-1
sqrt2:   dw  0x5a82,0x5a82,0x5a82,0x5a82    ; 0.5/sqrt(2)

;//////////////////////////////////////////////////////////////////////

align 16
fTab1:
  dw 0x4000, 0x4000, 0x58c5, 0x4b42,
  dw 0x4000, 0x4000, 0x3249, 0x11a8,
  dw 0x539f, 0x22a3, 0x4b42, 0xee58,
  dw 0xdd5d, 0xac61, 0xa73b, 0xcdb7,
  dw 0x4000, 0xc000, 0x3249, 0xa73b,
  dw 0xc000, 0x4000, 0x11a8, 0x4b42,
  dw 0x22a3, 0xac61, 0x11a8, 0xcdb7,
  dw 0x539f, 0xdd5d, 0x4b42, 0xa73b

fTab2:
  dw 0x58c5, 0x58c5, 0x7b21, 0x6862,
  dw 0x58c5, 0x58c5, 0x45bf, 0x187e,
  dw 0x73fc, 0x300b, 0x6862, 0xe782,
  dw 0xcff5, 0x8c04, 0x84df, 0xba41,
  dw 0x58c5, 0xa73b, 0x45bf, 0x84df,
  dw 0xa73b, 0x58c5, 0x187e, 0x6862,
  dw 0x300b, 0x8c04, 0x187e, 0xba41,
  dw 0x73fc, 0xcff5, 0x6862, 0x84df

fTab3:
  dw 0x539f, 0x539f, 0x73fc, 0x6254,
  dw 0x539f, 0x539f, 0x41b3, 0x1712,
  dw 0x6d41, 0x2d41, 0x6254, 0xe8ee,
  dw 0xd2bf, 0x92bf, 0x8c04, 0xbe4d,
  dw 0x539f, 0xac61, 0x41b3, 0x8c04,
  dw 0xac61, 0x539f, 0x1712, 0x6254,
  dw 0x2d41, 0x92bf, 0x1712, 0xbe4d,
  dw 0x6d41, 0xd2bf, 0x6254, 0x8c04

fTab4:
  dw 0x4b42, 0x4b42, 0x6862, 0x587e,
  dw 0x4b42, 0x4b42, 0x3b21, 0x14c3,
  dw 0x6254, 0x28ba, 0x587e, 0xeb3d,
  dw 0xd746, 0x9dac, 0x979e, 0xc4df,
  dw 0x4b42, 0xb4be, 0x3b21, 0x979e,
  dw 0xb4be, 0x4b42, 0x14c3, 0x587e,
  dw 0x28ba, 0x9dac, 0x14c3, 0xc4df,
  dw 0x6254, 0xd746, 0x587e, 0x979e

align 16
Fdct_Rnd0: dw  6,8,8,8
Fdct_Rnd1: dw  8,8,8,8
Fdct_Rnd2: dw 10,8,8,8
MMX_One:   dw  1,1,1,1

;//////////////////////////////////////////////////////////////////////

section .text

;//////////////////////////////////////////////////////////////////////
;// FDCT LLM vertical pass (~39c)
;//////////////////////////////////////////////////////////////////////

%macro fLLM_PASS 2  ; %1: src/dst, %2:Shift

  movq   mm0, [%1+0*16]   ; In0
  movq   mm2, [%1+2*16]   ; In2
  movq   mm3, mm0
  movq   mm4, mm2
  movq   mm7, [%1+7*16]   ; In7
  movq   mm5, [%1+5*16]   ; In5

  psubsw mm0, mm7         ; t7 =3D In0-In7
  paddsw mm7, mm3         ; t0 =3D In0+In7
  psubsw mm2, mm5         ; t5 =3D In2-In5
  paddsw mm5, mm4         ; t2 =3D In2+In5

  movq   mm3, [%1+3*16]   ; In3
  movq   mm4, [%1+4*16]   ; In4
  movq   mm1, mm3
  psubsw mm3, mm4         ; t4 =3D In3-In4
  paddsw mm4, mm1         ; t3 =3D In3+In4
  movq   mm6, [%1+6*16]   ; In6
  movq   mm1, [%1+1*16]   ; In1
  psubsw mm1, mm6         ; t6 =3D In1-In6
  paddsw mm6, [%1+1*16]   ; t1 =3D In1+In6

  psubsw mm7, mm4         ; tm03 =3D t0-t3
  psubsw mm6, mm5         ; tm12 =3D t1-t2
  paddsw mm4, mm4         ; 2.t3
  paddsw mm5, mm5         ; 2.t2
  paddsw mm4, mm7         ; tp03 =3D t0+t3
  paddsw mm5, mm6         ; tp12 =3D t1+t2

  psllw  mm2, %2+1        ; shift t5 (shift +1 to..
  psllw  mm1, %2+1        ; shift t6  ..compensate cos4/2)
  psllw  mm4, %2          ; shift t3
  psllw  mm5, %2          ; shift t2
  psllw  mm7, %2          ; shift t0
  psllw  mm6, %2          ; shift t1
  psllw  mm3, %2          ; shift t4
  psllw  mm0, %2          ; shift t7

  psubsw mm4, mm5         ; out4 =3D tp03-tp12
  psubsw mm1, mm2         ; mm1: t6-t5
  paddsw mm5, mm5
  paddsw mm2, mm2
  paddsw mm5, mm4         ; out0 =3D tp03+tp12
  movq   [%1+4*16], mm4   ; =3D> out4
  paddsw mm2, mm1         ; mm2: t6+t5
  movq   [%1+0*16], mm5   ; =3D> out0

  movq   mm4, [tan2]      ; mm4 <=3D tan2
  pmulhw mm4, mm7         ; tm03*tan2
  movq   mm5, [tan2]      ; mm5 <=3D tan2
  psubsw mm4, mm6         ; out6 =3D tm03*tan2 - tm12
  pmulhw mm5, mm6         ; tm12*tan2
  paddsw mm5, mm7         ; out2 =3D tm12*tan2 + tm03

  movq   mm6, [sqrt2] =20
  movq   mm7, [MMX_One]

  pmulhw mm2, mm6         ; mm2: tp65 =3D (t6 + t5)*cos4
  por    mm5, mm7         ; correct out2
  por    mm4, mm7         ; correct out6
  pmulhw mm1, mm6         ; mm1: tm65 =3D (t6 - t5)*cos4
  por    mm2, mm7         ; correct tp65

  movq   [%1+2*16], mm5   ; =3D> out2
  movq   mm5, mm3         ; save t4
  movq   [%1+6*16], mm4   ; =3D> out6
  movq   mm4, mm0         ; save t7
 =20
  psubsw mm3, mm1         ; mm3: tm465 =3D t4 - tm65
  psubsw mm0, mm2         ; mm0: tm765 =3D t7 - tp65
  paddsw mm2, mm4         ; mm2: tp765 =3D t7 + tp65
  paddsw mm1, mm5         ; mm1: tp465 =3D t4 + tm65

  movq   mm4, [tan3]      ; tan3 - 1
  movq   mm5, [tan1]      ; tan1

  movq   mm7, mm3         ; save tm465
  pmulhw mm3, mm4         ; tm465*(tan3-1)
  movq   mm6, mm1         ; save tp465
  pmulhw mm1, mm5         ; tp465*tan1

  paddsw mm3, mm7         ; tm465*tan3
  pmulhw mm4, mm0         ; tm765*(tan3-1)
  paddsw mm4, mm0         ; tm765*tan3
  pmulhw mm5, mm2         ; tp765*tan1

  paddsw mm1, mm2         ; out1 =3D tp765 + tp465*tan1
  psubsw mm0, mm3         ; out3 =3D tm765 - tm465*tan3
  paddsw mm7, mm4         ; out5 =3D tm465 + tm765*tan3
  psubsw mm5, mm6         ; out7 =3D-tp465 + tp765*tan1

  movq   [%1+1*16], mm1   ; =3D> out1
  movq   [%1+3*16], mm0   ; =3D> out3
  movq   [%1+5*16], mm7   ; =3D> out5
  movq   [%1+7*16], mm5   ; =3D> out7

%endmacro

;//////////////////////////////////////////////////////////////////////
;// fMTX_MULT (~20c)
;//////////////////////////////////////////////////////////////////////

%macro fMTX_MULT 4   ; %1=3Dsrc, %2 =3D Coeffs, %3/%4=3Drounders
  movq    mm0, [ecx+%1*16+0]  ; mm0 =3D [0123]

    ; the 'pshufw' below is the only SSE instruction.
    ; For MMX-only version, it should be emulated with
    ; some 'punpck' soup...

  pshufw  mm1, [ecx+%1*16+8], 00011011b ; mm1 =3D [7654]
  movq    mm7, mm0

  paddsw  mm0, mm1            ; mm0 =3D [a0 a1 a2 a3]
  psubsw  mm7, mm1            ; mm7 =3D [b0 b1 b2 b3]

  movq      mm1, mm0
  punpckldq mm0, mm7          ; mm0 =3D [a0 a1 b0 b1]
  punpckhdq mm1, mm7          ; mm1 =3D [b2 b3 a2 a3]

  movq    mm2, qword [%2+ 0]  ;  [   M00    M01      M16    M17]
  movq    mm3, qword [%2+ 8]  ;  [   M02    M03      M18    M19]
  pmaddwd mm2, mm0            ;  [a0.M00+a1.M01 | b0.M16+b1.M17]
  movq    mm4, qword [%2+16]  ;  [   M04    M05      M20    M21]
  pmaddwd mm3, mm1            ;  [a2.M02+a3.M03 | b2.M18+b3.M19]
  movq    mm5, qword [%2+24]  ;  [   M06    M07      M22    M23]
  pmaddwd mm4, mm0            ;  [a0.M04+a1.M05 | b0.M20+b1.M21]
  movq    mm6, qword [%2+32]  ;  [   M08    M09      M24    M25]
  pmaddwd mm5, mm1            ;  [a2.M06+a3.M07 | b2.M22+b3.M23]
  movq    mm7, qword [%2+40]  ;  [   M10    M11      M26    M27]
  pmaddwd mm6, mm0            ;  [a0.M08+a1.M09 | b0.M24+b1.M25]
  paddd   mm2, mm3            ;  [ out0 | out1 ]
  pmaddwd mm7, mm1            ;  [a0.M10+a1.M11 | b0.M26+b1.M27]
  psrad   mm2, 16
  pmaddwd mm0, qword [%2+48]  ;  [a0.M12+a1.M13 | b0.M28+b1.M29]
  paddd   mm4, mm5            ;  [ out2 | out3 ]
  pmaddwd mm1, qword [%2+56]  ;  [a0.M14+a1.M15 | b0.M30+b1.M31]
  psrad   mm4, 16

  paddd   mm6, mm7            ;  [ out4 | out5 ]
  psrad   mm6, 16
  paddd   mm0, mm1            ;  [ out6 | out7 ] =20
  psrad   mm0, 16
 =20
  packssdw mm2, mm4           ;  [ out0|out1|out2|out3 ]
  paddsw  mm2, [%3]           ;  Round
  packssdw mm6, mm0           ;  [ out4|out5|out6|out7 ]
  paddsw  mm6, [%4]           ;  Round

  psraw   mm2, 4               ; =3D> [-2048, 2047]
  psraw   mm6, 4

  movq    [ecx+%1*16+0], mm2
  movq    [ecx+%1*16+8], mm6
%endmacro

align 16
xvid_fdct_sse:    ; ~240c
  mov ecx, [esp+4]

  fLLM_PASS ecx+0, 3
  fLLM_PASS ecx+8, 3
  fMTX_MULT  0, fTab1, Fdct_Rnd0, Fdct_Rnd0
  fMTX_MULT  1, fTab2, Fdct_Rnd2, Fdct_Rnd1
  fMTX_MULT  2, fTab3, Fdct_Rnd1, Fdct_Rnd1
  fMTX_MULT  3, fTab4, Fdct_Rnd1, Fdct_Rnd1
  fMTX_MULT  4, fTab1, Fdct_Rnd0, Fdct_Rnd0
  fMTX_MULT  5, fTab4, Fdct_Rnd1, Fdct_Rnd1
  fMTX_MULT  6, fTab3, Fdct_Rnd1, Fdct_Rnd1
  fMTX_MULT  7, fTab2, Fdct_Rnd1, Fdct_Rnd1

  ret

;//////////////////////////////////////////////////////////////////////

--=-dLq130355Ig4zdyzCliD
Content-Disposition: attachment; filename=qpel.c
Content-Transfer-Encoding: quoted-printable
Content-Type: text/x-c; name=qpel.c; charset=ISO-8859-1

/**************************************************************************
 *
 *	XVID MPEG-4 VIDEO CODEC
 *  - QPel interpolation -
 *
 *	This program is free software; you can redistribute it and/or modify
 *	it under the terms of the GNU General Public License as published by
 *	the Free Software Foundation; either version 2 of the License, or
 *	(at your option) any later version.
 *
 *	This program is distributed in the hope that it will be useful,
 *	but WITHOUT ANY WARRANTY; without even the implied warranty of
 *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *	GNU General Public License for more details.
 *
 *	You should have received a copy of the GNU General Public License
 *	along with this program; if not, write to the Free Software
 *	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 *************************************************************************/

/**************************************************************************
 *
 *	History:
 *
 *  22.10.2002	initial coding  - Skal -
 *
 *************************************************************************/

#ifndef XVID_AUTO_INCLUDE

#include "../portab.h"
#include "./qpel.h"

//////////////////////////////////////////////////////////

static const int32_t FIR_Tab_8[9][8] =3D {
  { 14, -3,  2, -1,  0,  0,  0,  0 }
, { 23, 19, -6,  3, -1,  0,  0,  0 }
, { -7, 20, 20, -6,  3, -1,  0,  0 }
, {  3, -6, 20, 20, -6,  3, -1,  0 }
, { -1,  3, -6, 20, 20, -6,  3, -1 }
, {  0, -1,  3, -6, 20, 20, -6,  3 }
, {  0,  0, -1,  3, -6, 20, 20, -7 }
, {  0,  0,  0, -1,  3, -6, 19, 23 }
, {  0,  0,  0,  0, -1,  2, -3, 14 }
};

static const int32_t FIR_Tab_16[17][16] =3D {
  { 14, -3,  2, -1,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0 }
, { 23, 19, -6,  3, -1,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0 }
, { -7, 20, 20, -6,  3, -1,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0 }
, {  3, -6, 20, 20, -6,  3, -1,  0,  0,  0,  0,  0,  0,  0,  0,  0 }
, { -1,  3, -6, 20, 20, -6,  3, -1,  0,  0,  0,  0,  0,  0,  0,  0 }
, {  0, -1,  3, -6, 20, 20, -6,  3, -1,  0,  0,  0,  0,  0,  0,  0 }
, {  0,  0, -1,  3, -6, 20, 20, -6,  3, -1,  0,  0,  0,  0,  0,  0 }
, {  0,  0,  0, -1,  3, -6, 20, 20, -6,  3, -1,  0,  0,  0,  0,  0 }
, {  0,  0,  0,  0, -1,  3, -6, 20, 20, -6,  3, -1,  0,  0,  0,  0 }
, {  0,  0,  0,  0,  0, -1,  3, -6, 20, 20, -6,  3, -1,  0,  0,  0 }
, {  0,  0,  0,  0,  0,  0, -1,  3, -6, 20, 20, -6,  3, -1,  0,  0 }
, {  0,  0,  0,  0,  0,  0,  0, -1,  3, -6, 20, 20, -6,  3, -1,  0 }
, {  0,  0,  0,  0,  0,  0,  0,  0, -1,  3, -6, 20, 20, -6,  3, -1 }
, {  0,  0,  0,  0,  0,  0,  0,  0,  0, -1,  3, -6, 20, 20, -6,  3 }
, {  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, -1,  3, -6, 20, 20, -7 }
, {  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, -1,  3, -6, 19, 23 }
, {  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0,  0, -1,  2, -3, 14 }
};

//////////////////////////////////////////////////////////
// Implementation

#define XVID_AUTO_INCLUDE

  // 16x? filters

#define SIZE  16
#define TABLE FIR_Tab_16

#define STORE(d,s)  (d) =3D (s)
#define FUNC_H      H_Pass_16_C
#define FUNC_V      V_Pass_16_C
#define FUNC_HA     H_Pass_Avrg_16_C
#define FUNC_VA     V_Pass_Avrg_16_C
#define FUNC_HA_UP  H_Pass_Avrg_Up_16_C
#define FUNC_VA_UP  V_Pass_Avrg_Up_16_C

#include __FILE__   /* self-include ourself */

  // note: B-frame always uses Rnd=3D0...
#define STORE(d,s)  (d) =3D ( (s)+(d)+1 ) >> 1
#define FUNC_H      H_Pass_16_Add_C
#define FUNC_V      V_Pass_16_Add_C
#define FUNC_HA     H_Pass_Avrg_16_Add_C
#define FUNC_VA     V_Pass_Avrg_16_Add_C
#define FUNC_HA_UP  H_Pass_Avrg_Up_16_Add_C
#define FUNC_VA_UP  V_Pass_Avrg_Up_16_Add_C

#include __FILE__   /* self-include ourself */

#undef SIZE
#undef TABLE

  // 8x? filters

#define SIZE  8
#define TABLE FIR_Tab_8

#define STORE(d,s)  (d) =3D (s)
#define FUNC_H      H_Pass_8_C
#define FUNC_V      V_Pass_8_C
#define FUNC_HA     H_Pass_Avrg_8_C
#define FUNC_VA     V_Pass_Avrg_8_C
#define FUNC_HA_UP  H_Pass_Avrg_Up_8_C
#define FUNC_VA_UP  V_Pass_Avrg_Up_8_C

#include __FILE__   /* self-include ourself */

  // note: B-frame always uses Rnd=3D0...
#define STORE(d,s)  (d) =3D ( (s)+(d)+1 ) >> 1
#define FUNC_H      H_Pass_8_Add_C
#define FUNC_V      V_Pass_8_Add_C
#define FUNC_HA     H_Pass_Avrg_8_Add_C
#define FUNC_VA     V_Pass_Avrg_8_Add_C
#define FUNC_HA_UP  H_Pass_Avrg_Up_8_Add_C
#define FUNC_VA_UP  V_Pass_Avrg_Up_8_Add_C

#include __FILE__   /* self-include ourself */

#undef SIZE
#undef TABLE

#undef XVID_AUTO_INCLUDE

//////////////////////////////////////////////////////////
// general-purpose hooks
// TODO: embed in enc/dec structure?

XVID_QP_FUNCS *xvid_QP_Funcs =3D 0;
XVID_QP_FUNCS *xvid_QP_Add_Funcs =3D 0;

//////////////////////////////////////////////////////////
// plain C impl. declaration
// TODO: should be declared elsewhere?

XVID_QP_FUNCS xvid_QP_Funcs_C =3D {
  H_Pass_16_C,  H_Pass_Avrg_16_C,  H_Pass_Avrg_Up_16_C
, V_Pass_16_C,  V_Pass_Avrg_16_C,  V_Pass_Avrg_Up_16_C

, H_Pass_8_C,H_Pass_Avrg_8_C,H_Pass_Avrg_Up_8_C
, V_Pass_8_C,V_Pass_Avrg_8_C,V_Pass_Avrg_Up_8_C
};

XVID_QP_FUNCS xvid_QP_Add_Funcs_C =3D {
  H_Pass_16_Add_C,  H_Pass_Avrg_16_Add_C,  H_Pass_Avrg_Up_16_Add_C
, V_Pass_16_Add_C,  V_Pass_Avrg_16_Add_C,  V_Pass_Avrg_Up_16_Add_C

, H_Pass_8_Add_C,H_Pass_Avrg_8_Add_C,H_Pass_Avrg_Up_8_Add_C
, V_Pass_8_Add_C,V_Pass_Avrg_8_Add_C,V_Pass_Avrg_Up_8_Add_C
};

//////////////////////////////////////////////////////////
// mmx impl. declaration (cf. qpel_mmx.asm)
// TODO: should be declared elsewhere?

extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_Up_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_Up_16_mmx);

extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_8_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_8_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_Up_8_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_8_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_8_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_Up_8_mmx);

XVID_QP_FUNCS xvid_QP_Funcs_mmx =3D {
  xvid_H_Pass_16_mmx,  xvid_H_Pass_Avrg_16_mmx,  xvid_H_Pass_Avrg_Up_16_mmx
, xvid_V_Pass_16_mmx,  xvid_V_Pass_Avrg_16_mmx,  xvid_V_Pass_Avrg_Up_16_mmx

, xvid_H_Pass_8_mmx,xvid_H_Pass_Avrg_8_mmx,xvid_H_Pass_Avrg_Up_8_mmx
, xvid_V_Pass_8_mmx,xvid_V_Pass_Avrg_8_mmx,xvid_V_Pass_Avrg_Up_8_mmx
};

extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Add_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_Add_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_Up_Add_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Add_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_Add_16_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_Up_Add_16_mmx);

extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_8_Add_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_8_Add_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_H_Pass_Avrg_Up_8_Add_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_8_Add_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_8_Add_mmx);
extern XVID_QP_PASS_SIGNATURE(xvid_V_Pass_Avrg_Up_8_Add_mmx);

XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx =3D {
  xvid_H_Pass_Add_16_mmx,  xvid_H_Pass_Avrg_Add_16_mmx,  xvid_H_Pass_Avrg_U=
p_Add_16_mmx
, xvid_V_Pass_Add_16_mmx,  xvid_V_Pass_Avrg_Add_16_mmx,  xvid_V_Pass_Avrg_U=
p_Add_16_mmx

, xvid_H_Pass_8_Add_mmx,xvid_H_Pass_Avrg_8_Add_mmx,xvid_H_Pass_Avrg_Up_8_Ad=
d_mmx
, xvid_V_Pass_8_Add_mmx,xvid_V_Pass_Avrg_8_Add_mmx,xvid_V_Pass_Avrg_Up_8_Ad=
d_mmx
};

//////////////////////////////////////////////////////////
// tables for ASM

extern uint16_t xvid_Expand_mmx[256][4]; // 8b -> 64b expansion table

  // Alternate way of filtering (cf. USE_TABLES flag in qpel_mmx.asm)
  //
  // 17 tables, 2K each =3D> 34K
  // Mirroring can be acheived composing 11 basic tables
  // (for instance: (23,19,-6,3)=3D(20,20,-6,3)+(3,-1,0,0)
  // Using Symmetries (and bswap) could reduce further =20
  // the memory to 7 tables (->14K).

extern int16_t xvid_FIR_1_0_0_0[256][4];
extern int16_t xvid_FIR_3_1_0_0[256][4];
extern int16_t xvid_FIR_6_3_1_0[256][4];
extern int16_t xvid_FIR_14_3_2_1[256][4];
extern int16_t xvid_FIR_20_6_3_1[256][4];
extern int16_t xvid_FIR_20_20_6_3[256][4];
extern int16_t xvid_FIR_23_19_6_3[256][4];
extern int16_t xvid_FIR_7_20_20_6[256][4];
extern int16_t xvid_FIR_6_20_20_6[256][4];
extern int16_t xvid_FIR_6_20_20_7[256][4];
extern int16_t xvid_FIR_3_6_20_20[256][4];
extern int16_t xvid_FIR_3_6_19_23[256][4];
extern int16_t xvid_FIR_1_3_6_20[256][4];=20
extern int16_t xvid_FIR_1_2_3_14[256][4];=20
extern int16_t xvid_FIR_0_1_3_6[256][4]; =20
extern int16_t xvid_FIR_0_0_1_3[256][4]; =20
extern int16_t xvid_FIR_0_0_0_1[256][4]; =20

//////////////////////////////////////////////////////////

uint16_t xvid_Expand_mmx[256][4]; // 8b -> 64b expansion table

int16_t xvid_FIR_1_0_0_0[256][4];
int16_t xvid_FIR_3_1_0_0[256][4];
int16_t xvid_FIR_6_3_1_0[256][4];
int16_t xvid_FIR_14_3_2_1[256][4];
int16_t xvid_FIR_20_6_3_1[256][4];
int16_t xvid_FIR_20_20_6_3[256][4];
int16_t xvid_FIR_23_19_6_3[256][4];
int16_t xvid_FIR_7_20_20_6[256][4];
int16_t xvid_FIR_6_20_20_6[256][4];
int16_t xvid_FIR_6_20_20_7[256][4];
int16_t xvid_FIR_3_6_20_20[256][4];
int16_t xvid_FIR_3_6_19_23[256][4];
int16_t xvid_FIR_1_3_6_20[256][4];
int16_t xvid_FIR_1_2_3_14[256][4];
int16_t xvid_FIR_0_1_3_6[256][4];
int16_t xvid_FIR_0_0_1_3[256][4];
int16_t xvid_FIR_0_0_0_1[256][4];

static void Init_FIR_Table(int16_t Tab[][4],
                           int A, int B, int C, int D)
{
  int i;
  for(i=3D0; i<256; ++i) {
    Tab[i][0] =3D i*A;
    Tab[i][1] =3D i*B;
    Tab[i][2] =3D i*C;
    Tab[i][3] =3D i*D;
  }
}


void xvid_Init_QP_mmx()
{
  int i;
  for(i=3D0; i<256; ++i) {
    xvid_Expand_mmx[i][0] =3D i;
    xvid_Expand_mmx[i][1] =3D i;
    xvid_Expand_mmx[i][2] =3D i;
    xvid_Expand_mmx[i][3] =3D i;
  }

  // Alternate way of filtering (cf. USE_TABLES flag in qpel_mmx.asm)

  Init_FIR_Table(xvid_FIR_1_0_0_0,   -1,  0,  0,  0);
  Init_FIR_Table(xvid_FIR_3_1_0_0,    3, -1,  0,  0);
  Init_FIR_Table(xvid_FIR_6_3_1_0,   -6,  3, -1,  0);
  Init_FIR_Table(xvid_FIR_14_3_2_1,  14, -3,  2, -1);
  Init_FIR_Table(xvid_FIR_20_6_3_1,  20, -6,  3, -1);
  Init_FIR_Table(xvid_FIR_20_20_6_3, 20, 20, -6,  3);
  Init_FIR_Table(xvid_FIR_23_19_6_3, 23, 19, -6,  3);
  Init_FIR_Table(xvid_FIR_7_20_20_6, -7, 20, 20, -6);
  Init_FIR_Table(xvid_FIR_6_20_20_6, -6, 20, 20, -6);
  Init_FIR_Table(xvid_FIR_6_20_20_7, -6, 20, 20, -7);
  Init_FIR_Table(xvid_FIR_3_6_20_20,  3, -6, 20, 20);
  Init_FIR_Table(xvid_FIR_3_6_19_23,  3, -6, 19, 23);
  Init_FIR_Table(xvid_FIR_1_3_6_20,  -1,  3, -6, 20);
  Init_FIR_Table(xvid_FIR_1_2_3_14,  -1,  2, -3, 14);
  Init_FIR_Table(xvid_FIR_0_1_3_6,    0, -1,  3, -6);
  Init_FIR_Table(xvid_FIR_0_0_1_3,    0,  0, -1,  3);
  Init_FIR_Table(xvid_FIR_0_0_0_1,    0,  0,  0, -1);

}

#endif /* !XVID_AUTO_INCLUDE */

//////////////////////////////////////////////////////////
// "reference" filters impl. in plain C
//////////////////////////////////////////////////////////

#ifdef XVID_AUTO_INCLUDE

static
void FUNC_H(uint8_t *Dst, const uint8_t *Src, int32_t H, int32_t BpS, int32=
_t Rnd)
{
  while(H-->0) {
    int32_t i, k;
    int32_t Sums[SIZE] =3D { 0 };
    for(i=3D0; i<=3DSIZE; ++i)
      for(k=3D0; k<SIZE; ++k)
        Sums[k] +=3D TABLE[i][k] * Src[i];

    for(i=3D0; i<SIZE; ++i) {
      int32_t C =3D ( Sums[i] + 16-Rnd ) >> 5;
      if (C<0) C =3D 0; else if (C>255) C =3D 255;
      STORE(Dst[i], C);
    }
    Src +=3D BpS;
    Dst +=3D BpS;
  }
}

static
void FUNC_V(uint8_t *Dst, const uint8_t *Src, int32_t W, int32_t BpS, int32=
_t Rnd)
{
  while(W-->0) {
    int32_t i, k;
    int32_t Sums[SIZE] =3D { 0 };
    const uint8_t *S =3D Src++;
    uint8_t *D =3D Dst++;
    for(i=3D0; i<=3DSIZE; ++i) {
      for(k=3D0; k<SIZE; ++k)
        Sums[k] +=3D TABLE[i][k] * S[0];
      S +=3D BpS;
    }

    for(i=3D0; i<SIZE; ++i) {
      int32_t C =3D ( Sums[i] + 16-Rnd )>>5;
      if (C<0) C =3D 0; else if (C>255) C =3D 255;
      STORE(D[0], C);
      D +=3D BpS;
    }
  }
}

static
void FUNC_HA(uint8_t *Dst, const uint8_t *Src, int32_t H, int32_t BpS, int3=
2_t Rnd)
{
  while(H-->0) {
    int32_t i, k;
    int32_t Sums[SIZE] =3D { 0 };
    for(i=3D0; i<=3DSIZE; ++i)
      for(k=3D0; k<SIZE; ++k)
        Sums[k] +=3D TABLE[i][k] * Src[i];

    for(i=3D0; i<SIZE; ++i) {
      int32_t C =3D ( Sums[i] + 16-Rnd ) >> 5;
      if (C<0) C =3D 0; else if (C>255) C =3D 255;
      C =3D (C+Src[i]+1-Rnd) >> 1;
      STORE(Dst[i], C);
    }
    Src +=3D BpS;
    Dst +=3D BpS;
  }
}

static
void FUNC_HA_UP(uint8_t *Dst, const uint8_t *Src, int32_t H, int32_t BpS, i=
nt32_t Rnd)
{
  while(H-->0) {
    int32_t i, k;
    int32_t Sums[SIZE] =3D { 0 };
    for(i=3D0; i<=3DSIZE; ++i)
      for(k=3D0; k<SIZE; ++k)
        Sums[k] +=3D TABLE[i][k] * Src[i];

    for(i=3D0; i<SIZE; ++i) {
      int32_t C =3D ( Sums[i] + 16-Rnd ) >> 5;
      if (C<0) C =3D 0; else if (C>255) C =3D 255;
      C =3D (C+Src[i+1]+1-Rnd) >> 1;
      STORE(Dst[i], C);
    }
    Src +=3D BpS;
    Dst +=3D BpS;
  }
}

static
void FUNC_VA(uint8_t *Dst, const uint8_t *Src, int32_t W, int32_t BpS, int3=
2_t Rnd)
{
  while(W-->0) {
    int32_t i, k;
    int32_t Sums[SIZE] =3D { 0 };
    const uint8_t *S =3D Src;
    uint8_t *D =3D Dst;

    for(i=3D0; i<=3DSIZE; ++i) {
      for(k=3D0; k<SIZE; ++k)
        Sums[k] +=3D TABLE[i][k] * S[0];
      S +=3D BpS;
    }

    S =3D Src;
    for(i=3D0; i<SIZE; ++i) {
      int32_t C =3D ( Sums[i] + 16-Rnd )>>5;
      if (C<0) C =3D 0; else if (C>255) C =3D 255;
      C =3D ( C+S[0]+1-Rnd ) >> 1;
      STORE(D[0], C);
      D +=3D BpS;
      S +=3D BpS;
    }
    Src++;
    Dst++;
  }
}

static
void FUNC_VA_UP(uint8_t *Dst, const uint8_t *Src, int32_t W, int32_t BpS, i=
nt32_t Rnd)
{
  while(W-->0) {
    int32_t i, k;
    int32_t Sums[SIZE] =3D { 0 };
    const uint8_t *S =3D Src;
    uint8_t *D =3D Dst;

    for(i=3D0; i<=3DSIZE; ++i) {
      for(k=3D0; k<SIZE; ++k)
        Sums[k] +=3D TABLE[i][k] * S[0];
      S +=3D BpS;
    }

    S =3D Src + BpS;
    for(i=3D0; i<SIZE; ++i) {
      int32_t C =3D ( Sums[i] + 16-Rnd )>>5;
      if (C<0) C =3D 0; else if (C>255) C =3D 255;
      C =3D ( C+S[0]+1-Rnd ) >> 1;
      STORE(D[0], C);
      D +=3D BpS;
      S +=3D BpS;
    }
    Dst++;
    Src++;
  }
}

#undef STORE
#undef FUNC_H
#undef FUNC_V
#undef FUNC_HA
#undef FUNC_VA
#undef FUNC_HA_UP
#undef FUNC_VA_UP

#endif /* XVID_AUTO_INCLUDE */

//////////////////////////////////////////////////////////

--=-dLq130355Ig4zdyzCliD
Content-Disposition: attachment; filename=qpel.h
Content-Transfer-Encoding: quoted-printable
Content-Type: text/x-c-header; name=qpel.h; charset=ISO-8859-1

/**************************************************************************=
***
*
*  XVID MPEG-4 VIDEO CODEC
*  - QPel interpolation -
*
*  This program is free software ; you can redistribute it and/or modify
*  it under the terms of the GNU General Public License as published by
*  the Free Software Foundation ; either version 2 of the License, or
*  (at your option) any later version.
*
*  This program is distributed in the hope that it will be useful,
*  but WITHOUT ANY WARRANTY ; without even the implied warranty of
*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
*  GNU General Public License for more details.
*
*  You should have received a copy of the GNU General Public License
*  along with this program ; if not, write to the Free Software
*  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
*
***************************************************************************=
**/

/**************************************************************************
 *
 *	History:
 *
 *  22.10.2002	initial coding  - Skal -
 *
 *************************************************************************/

#ifndef _XVID_QPEL_H_
#define _XVID_QPEL_H_

#include "../utils/mem_transfer.h"

/**************************************************************************=
***
 * Signatures
 **************************************************************************=
**/

#define XVID_QP_PASS_SIGNATURE(NAME)  \
  void (NAME)(uint8_t *dst, const uint8_t *src, int32_t length, int32_t BpS=
, int32_t rounding)

typedef  XVID_QP_PASS_SIGNATURE(XVID_QP_PASS);

    // We put everything in a single struct so it can easily be passed
    // to prediction functions as a whole...

struct XVID_QP_FUNCS {

    // filter for QPel 16x? prediction

  XVID_QP_PASS *H_Pass;
  XVID_QP_PASS *H_Pass_Avrg;
  XVID_QP_PASS *H_Pass_Avrg_Up;
  XVID_QP_PASS *V_Pass;
  XVID_QP_PASS *V_Pass_Avrg;
  XVID_QP_PASS *V_Pass_Avrg_Up;

    // filter for QPel 8x? prediction

  XVID_QP_PASS *H_Pass_8;
  XVID_QP_PASS *H_Pass_Avrg_8;
  XVID_QP_PASS *H_Pass_Avrg_Up_8;
  XVID_QP_PASS *V_Pass_8;
  XVID_QP_PASS *V_Pass_Avrg_8;
  XVID_QP_PASS *V_Pass_Avrg_Up_8;
};
typedef struct XVID_QP_FUNCS  XVID_QP_FUNCS;

/**************************************************************************=
***
 * fwd dcl
 **************************************************************************=
**/

extern XVID_QP_FUNCS xvid_QP_Funcs_C;       // for P-frames
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_C;   // for B-frames

extern XVID_QP_FUNCS xvid_QP_Funcs_mmx;
extern XVID_QP_FUNCS xvid_QP_Add_Funcs_mmx;
extern void xvid_Init_QP_mmx(); // should be called at mmx initialization

extern XVID_QP_FUNCS *xvid_QP_Funcs;      // <- main pointer for enc/dec st=
ructure
extern XVID_QP_FUNCS *xvid_QP_Add_Funcs;  // <- main pointer for enc/dec st=
ructure

/**************************************************************************=
***
 * macros
 **************************************************************************=
**/

/**************************************************************************=
***

    Passes to be performed

 case 0:         copy
 case 2:         h-pass
 case 1/3:       h-pass + h-avrg
 case 8:                           v-pass
 case 10:        h-pass          + v-pass
 case 9/11:      h-pass + h-avrg + v-pass
 case 4/12:                        v-pass + v-avrg
 case 6/14:      h-pass          + v-pass + v-avrg
 case 5/13/7/15: h-pass + h-avrg + v-pass + v-avrg

 **************************************************************************=
**/

static __inline void new_interpolate16x16_quarterpel(
    uint8_t * const cur,
    uint8_t * const refn,
		uint8_t * const refh,
		uint8_t * const refv,
		uint8_t * const refhv,
		const uint32_t x, const uint32_t y,
		const int32_t dx,  const int dy,
		const uint32_t stride,
		const uint32_t rounding)
{
	const uint8_t *src;
	uint8_t *dst;
	uint8_t *tmp;
  int32_t quads;
  const XVID_QP_FUNCS *Ops;

  Ops =3D xvid_QP_Funcs; // TODO: pass as argument
  quads =3D (dx&3) | ((dy&3)<<2);
  dst =3D cur + y * stride + x;
  src =3D refn + (dy>>2) * stride + (dx>>2);
  tmp =3D refh; // we need at least a 16 x stride scratch block

  switch(quads) {
    case 0:
      transfer8x8_copy( dst, src, stride);
      transfer8x8_copy( dst+8, src+8, stride);
      transfer8x8_copy( dst+8*stride, src+8*stride, stride);
      transfer8x8_copy( dst+8*stride+8, src+8*stride+8, stride);
    break;
    case 1:
      Ops->H_Pass_Avrg(dst, src, 16, stride, rounding);
    break;
    case 2:
      Ops->H_Pass(dst, src, 16, stride, rounding);
    break;
    case 3:
      Ops->H_Pass_Avrg_Up(dst, src, 16, stride, rounding);
    break;
    case 4:
      Ops->V_Pass_Avrg(dst, src, 16, stride, rounding);
    break;
    case 5:
      Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
      Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
    break;
    case 6:
      Ops->H_Pass(tmp, src,   17, stride, rounding);
      Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
    break;
    case 7:
      Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
      Ops->V_Pass_Avrg(dst, tmp, 16, stride, rounding);
    break;
    case 8:
      Ops->V_Pass(dst, src, 16, stride, rounding);
    break;
    case 9:
      Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
      Ops->V_Pass(dst, tmp, 16, stride, rounding);
    break;
    case 10:
      Ops->H_Pass(tmp, src, 17, stride, rounding);
      Ops->V_Pass(dst, tmp, 16, stride, rounding);
    break;
    case 11:
      Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
      Ops->V_Pass(dst, tmp, 16, stride, rounding);
    break;
    case 12:
      Ops->V_Pass_Avrg_Up(dst, src, 16, stride, rounding);
    break;
    case 13:
      Ops->H_Pass_Avrg(tmp, src, 17, stride, rounding);
      Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
    break;
    case 14:
      Ops->H_Pass(tmp, src, 17, stride, rounding);
      Ops->V_Pass_Avrg_Up( dst, tmp, 16, stride, rounding);
    break;
    case 15:
      Ops->H_Pass_Avrg_Up(tmp, src, 17, stride, rounding);
      Ops->V_Pass_Avrg_Up(dst, tmp, 16, stride, rounding);
    break;
  }
}

static __inline void new_interpolate16x8_quarterpel(
    uint8_t * const cur,
    uint8_t * const refn,
		uint8_t * const refh,
		uint8_t * const refv,
		uint8_t * const refhv,
		const uint32_t x, const uint32_t y,
		const int32_t dx,  const int dy,
		const uint32_t stride,
		const uint32_t rounding)
{
	const uint8_t *src;
	uint8_t *dst;
	uint8_t *tmp;
  int32_t quads;
  const XVID_QP_FUNCS *Ops;

  Ops =3D xvid_QP_Funcs; // TODO: pass as argument
  quads =3D (dx&3) | ((dy&3)<<2);
  dst =3D cur + y * stride + x;
  src =3D refn + (dy>>2) * stride + (dx>>2);
  tmp =3D refh; // we need at least a 16 x stride scratch block

  switch(quads) {
    case 0:
      transfer8x8_copy( dst, src, stride);
      transfer8x8_copy( dst+8, src+8, stride);
    break;
    case 1:
      Ops->H_Pass_Avrg(dst, src, 8, stride, rounding);
    break;
    case 2:
      Ops->H_Pass(dst, src, 8, stride, rounding);
    break;
    case 3:
      Ops->H_Pass_Avrg_Up(dst, src, 8, stride, rounding);
    break;
    case 4:
      Ops->V_Pass_Avrg_8(dst, src, 16, stride, rounding);
    break;
    case 5:
      Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
    break;
    case 6:
      Ops->H_Pass(tmp, src,   9, stride, rounding);
      Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
    break;
    case 7:
      Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_8(dst, tmp, 16, stride, rounding);
    break;
    case 8:
      Ops->V_Pass_8(dst, src, 16, stride, rounding);
    break;
    case 9:
      Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
      Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
    break;
    case 10:
      Ops->H_Pass(tmp, src, 9, stride, rounding);
      Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
    break;
    case 11:
      Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
      Ops->V_Pass_8(dst, tmp, 16, stride, rounding);
    break;
    case 12:
      Ops->V_Pass_Avrg_Up_8(dst, src, 16, stride, rounding);
    break;
    case 13:
      Ops->H_Pass_Avrg(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
    break;
    case 14:
      Ops->H_Pass(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_Up_8( dst, tmp, 16, stride, rounding);
    break;
    case 15:
      Ops->H_Pass_Avrg_Up(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_Up_8(dst, tmp, 16, stride, rounding);
    break;
  }
}

static __inline void new_interpolate8x8_quarterpel(
    uint8_t * const cur,
    uint8_t * const refn,
		uint8_t * const refh,
		uint8_t * const refv,
		uint8_t * const refhv,
		const uint32_t x, const uint32_t y,
		const int32_t dx,  const int dy,
		const uint32_t stride,
		const uint32_t rounding)
{
	const uint8_t *src;
	uint8_t *dst;
	uint8_t *tmp;
  int32_t quads;
  const XVID_QP_FUNCS *Ops;

  Ops =3D xvid_QP_Funcs; // TODO: pass as argument
  quads =3D (dx&3) | ((dy&3)<<2);
  dst =3D cur + y * stride + x;
  src =3D refn + (dy>>2) * stride + (dx>>2);
  tmp =3D refh; // we need at least a 16 x stride scratch block

  switch(quads) {
    case 0:
      transfer8x8_copy( dst, src, stride);
    break;
    case 1:
      Ops->H_Pass_Avrg_8(dst, src, 8, stride, rounding);
    break;
    case 2:
      Ops->H_Pass_8(dst, src, 8, stride, rounding);
    break;
    case 3:
      Ops->H_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
    break;
    case 4:
      Ops->V_Pass_Avrg_8(dst, src, 8, stride, rounding);
    break;
    case 5:
      Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
    break;
    case 6:
      Ops->H_Pass_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
    break;
    case 7:
      Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_8(dst, tmp, 8, stride, rounding);
    break;
    case 8:
      Ops->V_Pass_8(dst, src, 8, stride, rounding);
    break;
    case 9:
      Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
    break;
    case 10:
      Ops->H_Pass_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
    break;
    case 11:
      Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_8(dst, tmp, 8, stride, rounding);
    break;
    case 12:
      Ops->V_Pass_Avrg_Up_8(dst, src, 8, stride, rounding);
    break;
    case 13:
      Ops->H_Pass_Avrg_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
    break;
    case 14:
      Ops->H_Pass_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_Up_8( dst, tmp, 8, stride, rounding);
    break;
    case 15:
      Ops->H_Pass_Avrg_Up_8(tmp, src, 9, stride, rounding);
      Ops->V_Pass_Avrg_Up_8(dst, tmp, 8, stride, rounding);
    break;
  }
}
/**************************************************************************=
***/

#endif  /* _XVID_QPEL_H_ */

--=-dLq130355Ig4zdyzCliD
Content-Disposition: attachment; filename=qpel_mmx.asm
Content-Transfer-Encoding: quoted-printable
Content-Type: text/plain; name=qpel_mmx.asm; charset=ISO-8859-1

;/*************************************************************************=
****
; *
; *  XVID MPEG-4 VIDEO CODEC
; *  - Quarter-pixel interpolation -
; *  Copyright(C) 2002 Pascal Massimino <skal@planet-d.net>
; *
; *  This file is part of XviD, a free MPEG-4 video encoder/decoder
; *
; *  XviD is free software; you can redistribute it and/or modify it
; *  under the terms of the GNU General Public License as published by
; *  the Free Software Foundation; either version 2 of the License, or
; *  (at your option) any later version.
; *
; *  This program is distributed in the hope that it will be useful,
; *  but WITHOUT ANY WARRANTY; without even the implied warranty of
; *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
; *  GNU General Public License for more details.
; *
; *  You should have received a copy of the GNU General Public License
; *  along with this program; if not, write to the Free Software
; *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 U=
SA
; *
; *  Under section 8 of the GNU General Public License, the copyright
; *  holders of XVID explicitly forbid distribution in the following
; *  countries:
; *
; * - Japan
; * - United States of America
; *
; *  Linking XviD statically or dynamically with other modules is making a
; *  combined work based on XviD.  Thus, the terms and conditions of the
; *  GNU General Public License cover the whole combination.
; *
; *  As a special exception, the copyright holders of XviD give you
; *  permission to link XviD with independent modules that communicate with
; *  XviD solely through the VFW1.1 and DShow interfaces, regardless of the
; *  license terms of these independent modules, and to copy and distribute
; *  the resulting combined work under terms of your choice, provided that
; *  every copy of the combined work is accompanied by a complete copy of
; *  the source code of XviD (the version of XviD used to produce the
; *  combined work), being distributed under the terms of the GNU General
; *  Public License plus this exception.  An independent module is a module
; *  which is not derived from or based on XviD.
; *
; *  Note that people who make modified versions of XviD are not obligated
; *  to grant this special exception for their modified versions; it is
; *  their choice whether to do so.  The GNU General Public License gives
; *  permission to release a modified version without this exception; this
; *  exception also makes it possible to release a modified version which
; *  carries forward this exception.
; *
; * $Id: qpel_mmx.asm,v 1.1.2.1 2002/12/08 05:34:16 suxen_drol Exp $
; *
; *************************************************************************=
/

;/*************************************************************************=
*
; *
; *	History:
; *
; * 22.10.2002  initial coding. unoptimized 'proof of concept',=20
; *             just to heft the qpel filtering. - Skal -
; *
; *************************************************************************=
/


%define USE_TABLES      ; in order to use xvid_FIR_x_x_x_x tables
                        ; instead of xvid_Expand_mmx...


bits 32

%macro cglobal 1=20
	%ifdef PREFIX
		global _%1=20
		%define %1 _%1
	%else
		global %1
	%endif
%endmacro
%macro cextern 1=20
	%ifdef PREFIX
		extern _%1=20
		%define %1 _%1
	%else
		extern %1
	%endif
%endmacro


;//////////////////////////////////////////////////////////////////////
;// Declarations
;//   all signatures are:
;// void XXX(uint8_t *dst, const uint8_t *src,=20
;//          int32_t length, int32_t stride, int32_t rounding)
;//////////////////////////////////////////////////////////////////////

cglobal xvid_H_Pass_16_mmx
cglobal xvid_H_Pass_Avrg_16_mmx
cglobal xvid_H_Pass_Avrg_Up_16_mmx
cglobal xvid_V_Pass_16_mmx
cglobal xvid_V_Pass_Avrg_16_mmx
cglobal xvid_V_Pass_Avrg_Up_16_mmx
cglobal xvid_H_Pass_8_mmx
cglobal xvid_H_Pass_Avrg_8_mmx
cglobal xvid_H_Pass_Avrg_Up_8_mmx
cglobal xvid_V_Pass_8_mmx
cglobal xvid_V_Pass_Avrg_8_mmx
cglobal xvid_V_Pass_Avrg_Up_8_mmx

cglobal xvid_H_Pass_Add_16_mmx
cglobal xvid_H_Pass_Avrg_Add_16_mmx
cglobal xvid_H_Pass_Avrg_Up_Add_16_mmx
cglobal xvid_V_Pass_Add_16_mmx
cglobal xvid_V_Pass_Avrg_Add_16_mmx
cglobal xvid_V_Pass_Avrg_Up_Add_16_mmx
cglobal xvid_H_Pass_8_Add_mmx
cglobal xvid_H_Pass_Avrg_8_Add_mmx
cglobal xvid_H_Pass_Avrg_Up_8_Add_mmx
cglobal xvid_V_Pass_8_Add_mmx
cglobal xvid_V_Pass_Avrg_8_Add_mmx
cglobal xvid_V_Pass_Avrg_Up_8_Add_mmx

cextern xvid_Expand_mmx

%ifdef USE_TABLES

cextern xvid_FIR_1_0_0_0
cextern xvid_FIR_3_1_0_0
cextern xvid_FIR_6_3_1_0
cextern xvid_FIR_14_3_2_1
cextern xvid_FIR_20_6_3_1
cextern xvid_FIR_20_20_6_3
cextern xvid_FIR_23_19_6_3
cextern xvid_FIR_7_20_20_6
cextern xvid_FIR_6_20_20_6
cextern xvid_FIR_6_20_20_7
cextern xvid_FIR_3_6_20_20
cextern xvid_FIR_3_6_19_23
cextern xvid_FIR_1_3_6_20
cextern xvid_FIR_1_2_3_14
cextern xvid_FIR_0_1_3_6
cextern xvid_FIR_0_0_1_3
cextern xvid_FIR_0_0_0_1

%endif

;//////////////////////////////////////////////////////////////////////=20

section .data

align 16
Rounder1_MMX:
times 4 dw 1
Rounder0_MMX:
times 4 dw 0

align 16
Rounder_QP_MMX
times 4 dw 16
times 4 dw 15

%ifndef USE_TABLES

align 16

  ; H-Pass table shared by 16x? and 8x? filters

FIR_R0:  dw 14, -3,  2, -1
align 16
FIR_R1:  dw 23, 19, -6,  3,   -1,  0,  0,  0

FIR_R2:  dw -7, 20, 20, -6,    3, -1,  0,  0

FIR_R3:  dw  3, -6, 20, 20,   -6,  3, -1,  0

FIR_R4:  dw -1,  3, -6, 20,   20, -6,  3, -1

FIR_R5:  dw  0, -1,  3, -6,   20, 20, -6,  3,   -1,  0,  0,  0
align 16
FIR_R6:  dw  0,  0, -1,  3,   -6, 20, 20, -6,    3, -1,  0,  0
align 16
FIR_R7:  dw  0,  0,  0, -1,    3, -6, 20, 20,   -6,  3, -1,  0
align 16
FIR_R8:  dw                   -1,  3, -6, 20,   20, -6,  3, -1

FIR_R9:  dw                    0, -1,  3, -6,   20, 20, -6,  3,   -1,  0,  =
0,  0
align 16
FIR_R10: dw                    0,  0, -1,  3,   -6, 20, 20, -6,    3, -1,  =
0,  0
align 16
FIR_R11: dw                    0,  0,  0, -1,    3, -6, 20, 20,   -6,  3, -=
1,  0
align 16
FIR_R12: dw                                     -1,  3, -6, 20,   20, -6,  =
3, -1

FIR_R13: dw                                      0, -1,  3, -6,   20, 20, -=
6,  3

FIR_R14: dw                                      0,  0, -1,  3,   -6, 20, 2=
0, -7

FIR_R15: dw                                      0,  0,  0, -1,    3, -6, 1=
9, 23

FIR_R16: dw                                                       -1,  2, -=
3, 14

%endif  ; !USE_TABLES

  ; V-Pass taps

align 16
FIR_Cm7: times 4 dw -7
FIR_Cm6: times 4 dw -6
FIR_Cm3: times 4 dw -3
FIR_Cm1: times 4 dw -1
FIR_C2:  times 4 dw  2
FIR_C3:  times 4 dw  3
FIR_C14: times 4 dw 14
FIR_C19: times 4 dw 19
FIR_C20: times 4 dw 20
FIR_C23: times 4 dw 23

section .text

;//////////////////////////////////////////////////////////////////////
;// Here we go with the Q-Pel mess.
;//  For horizontal passes, we process 4 *output* pixel in parallel
;//  For vertical ones, we process 4 *input* pixel in parallel.
;//////////////////////////////////////////////////////////////////////

%macro PROLOG_NO_AVRG 0
  push esi
  push edi
  push ebp
  mov edi, [esp+16 + 0*4] ; Dst
  mov esi, [esp+16 + 1*4] ; Src
  mov ecx, [esp+16 + 2*4] ; Size
  mov ebp, [esp+16 + 3*4] ; BpS
  mov eax, [esp+16 + 4*4] ; Rnd
  and eax, 1
  movq mm7, [Rounder_QP_MMX+eax*8]  ; rounder
%endmacro

%macro EPILOG_NO_AVRG 0
  pop ebp
  pop edi
  pop esi
  ret
%endmacro

%macro PROLOG_AVRG 0
  push ebx
  push esi
  push edi
  push ebp
  mov edi, [esp+20 + 0*4] ; Dst
  mov esi, [esp+20 + 1*4] ; Src
  mov ecx, [esp+20 + 2*4] ; Size
  mov ebp, [esp+20 + 3*4] ; BpS
  mov eax, [esp+20 + 4*4] ; Rnd
  and eax, 1
  movq mm7, [Rounder_QP_MMX+eax*8]  ; rounder
  lea ebx, [Rounder1_MMX+eax*8]     ; *Rounder2
%endmacro

%macro EPILOG_AVRG 0
  pop ebp
  pop edi
  pop esi
  pop ebx
  ret
%endmacro

;//////////////////////////////////////////////////////////////////////
;//
;// All horizontal passes
;//
;//////////////////////////////////////////////////////////////////////

  ; macros for USE_TABLES

%macro TLOAD 2     ; %1,%2: src pixels
  movzx eax, byte [esi+%1]
  movzx edx, byte [esi+%2]
  movq mm0, [xvid_FIR_14_3_2_1 + eax*8 ]
  movq mm3, [xvid_FIR_1_2_3_14 + edx*8 ]
  paddw mm0, mm7
  paddw mm3, mm7
%endmacro

%macro TACCUM2 5   ;%1:src pixel/%2-%3:Taps tables/ %4-%5:dst regs
  movzx eax, byte [esi+%1]
  paddw %4, [%2 + eax*8]
  paddw %5, [%3 + eax*8]
%endmacro

%macro TACCUM3 7   ;%1:src pixel/%2-%4:Taps tables/%5-%7:dst regs
  movzx eax, byte [esi+%1]
  paddw %5, [%2 + eax*8]
  paddw %6, [%3 + eax*8]
  paddw %7, [%4 + eax*8]
%endmacro

;//////////////////////////////////////////////////////////////////////

  ; macros without USE_TABLES

%macro LOAD 2     ; %1,%2: src pixels
  movzx eax, byte [esi+%1]
  movzx edx, byte [esi+%2]
  movq mm0, [xvid_Expand_mmx + eax*8]
  movq mm3, [xvid_Expand_mmx + edx*8]
  pmullw mm0, [FIR_R0 ]
  pmullw mm3, [FIR_R16]
  paddw mm0, mm7
  paddw mm3, mm7
%endmacro

%macro ACCUM2 4   ;src pixel/Taps/dst regs #1-#2
  movzx eax, byte [esi+%1]
  movq mm4, [xvid_Expand_mmx + eax*8]
  movq mm5, mm4
  pmullw mm4, [%2]
  pmullw mm5, [%2+8]
  paddw %3, mm4
  paddw %4, mm5
%endmacro

%macro ACCUM3 5   ;src pixel/Taps/dst regs #1-#2-#3
  movzx eax, byte [esi+%1]
  movq mm4, [xvid_Expand_mmx + eax*8]
  movq mm5, mm4
  movq mm6, mm5
  pmullw mm4, [%2   ]
  pmullw mm5, [%2+ 8]
  pmullw mm6, [%2+16]
  paddw %3, mm4
  paddw %4, mm5
  paddw %5, mm6
%endmacro

;//////////////////////////////////////////////////////////////////////

%macro MIX 3   ; %1:reg, %2:src, %3:rounder
  pxor mm6, mm6
  movq mm4, [%2]
  movq mm1, %1
  movq mm5, mm4
  punpcklbw %1, mm6
  punpcklbw mm4, mm6
  punpckhbw mm1, mm6
  punpckhbw mm5, mm6
  movq mm6, [%3]   ; rounder #2
  paddusw %1, mm4
  paddusw mm1, mm5
  paddusw %1, mm6
  paddusw mm1, mm6
  psrlw %1, 1
  psrlw mm1, 1
  packuswb %1, mm1
%endmacro

;//////////////////////////////////////////////////////////////////////

%macro H_PASS_16  2   ; %1:src-op (0=3DNONE,1=3DAVRG,2=3DAVRG-UP), %2:dst-o=
p (NONE/AVRG)

%if (%2=3D=3D0) && (%1=3D=3D0)
   PROLOG_NO_AVRG
%else
   PROLOG_AVRG
%endif

.Loop

    ;  mm0..mm3 serves as a 4x4 delay line

%ifndef USE_TABLES

  LOAD 0, 16  ; special case for 1rst/last pixel
  movq mm1, mm7
  movq mm2, mm7

  ACCUM2 1,    FIR_R1, mm0, mm1
  ACCUM2 2,    FIR_R2, mm0, mm1
  ACCUM2 3,    FIR_R3, mm0, mm1
  ACCUM2 4,    FIR_R4, mm0, mm1

  ACCUM3 5,    FIR_R5, mm0, mm1, mm2
  ACCUM3 6,    FIR_R6, mm0, mm1, mm2
  ACCUM3 7,    FIR_R7, mm0, mm1, mm2
  ACCUM2 8,    FIR_R8, mm1, mm2
  ACCUM3 9,    FIR_R9, mm1, mm2, mm3
  ACCUM3 10,   FIR_R10,mm1, mm2, mm3
  ACCUM3 11,   FIR_R11,mm1, mm2, mm3

  ACCUM2 12,   FIR_R12, mm2, mm3
  ACCUM2 13,   FIR_R13, mm2, mm3
  ACCUM2 14,   FIR_R14, mm2, mm3
  ACCUM2 15,   FIR_R15, mm2, mm3

%else

  TLOAD 0, 16  ; special case for 1rst/last pixel
  movq mm1, mm7
  movq mm2, mm7

  TACCUM2 1,    xvid_FIR_23_19_6_3, xvid_FIR_1_0_0_0 , mm0, mm1
  TACCUM2 2,    xvid_FIR_7_20_20_6, xvid_FIR_3_1_0_0 , mm0, mm1
  TACCUM2 3,    xvid_FIR_3_6_20_20, xvid_FIR_6_3_1_0 , mm0, mm1
  TACCUM2 4,    xvid_FIR_1_3_6_20 , xvid_FIR_20_6_3_1, mm0, mm1

  TACCUM3 5,    xvid_FIR_0_1_3_6  , xvid_FIR_20_20_6_3, xvid_FIR_1_0_0_0  ,=
 mm0, mm1, mm2
  TACCUM3 6,    xvid_FIR_0_0_1_3  , xvid_FIR_6_20_20_6, xvid_FIR_3_1_0_0  ,=
 mm0, mm1, mm2
  TACCUM3 7,    xvid_FIR_0_0_0_1  , xvid_FIR_3_6_20_20, xvid_FIR_6_3_1_0  ,=
 mm0, mm1, mm2

  TACCUM2 8,                       xvid_FIR_1_3_6_20 , xvid_FIR_20_6_3_1 , =
     mm1, mm2

  TACCUM3 9,                       xvid_FIR_0_1_3_6  , xvid_FIR_20_20_6_3, =
xvid_FIR_1_0_0_0,  mm1, mm2, mm3
  TACCUM3 10,                      xvid_FIR_0_0_1_3  , xvid_FIR_6_20_20_6, =
xvid_FIR_3_1_0_0,  mm1, mm2, mm3
  TACCUM3 11,                      xvid_FIR_0_0_0_1  , xvid_FIR_3_6_20_20, =
xvid_FIR_6_3_1_0,  mm1, mm2, mm3

  TACCUM2 12,  xvid_FIR_1_3_6_20, xvid_FIR_20_6_3_1 , mm2, mm3
  TACCUM2 13,  xvid_FIR_0_1_3_6 , xvid_FIR_20_20_6_3, mm2, mm3
  TACCUM2 14,  xvid_FIR_0_0_1_3 , xvid_FIR_6_20_20_7, mm2, mm3
  TACCUM2 15,  xvid_FIR_0_0_0_1 , xvid_FIR_3_6_19_23, mm2, mm3

%endif

  psraw mm0, 5
  psraw mm1, 5
  psraw mm2, 5
  psraw mm3, 5
  packuswb mm0, mm1
  packuswb mm2, mm3

%if (%1=3D=3D1)
  MIX mm0, esi, ebx
%elif (%1=3D=3D2)
  MIX mm0, esi+1, ebx
%endif
%if (%2=3D=3D1)
  MIX mm0, edi, Rounder1_MMX
%endif

%if (%1=3D=3D1)
  MIX mm2, esi+8, ebx
%elif (%1=3D=3D2)
  MIX mm2, esi+9, ebx
%endif
%if (%2=3D=3D1)
  MIX mm2, edi+8, Rounder1_MMX
%endif

  lea esi, [esi+ebp]

  movq [edi+0], mm0
  movq [edi+8], mm2

  add edi, ebp
  dec ecx
  jg .Loop

%if (%2=3D=3D0) && (%1=3D=3D0)
  EPILOG_NO_AVRG
%else
  EPILOG_AVRG
%endif

%endmacro


;//////////////////////////////////////////////////////////////////////

%macro H_PASS_8  2   ; %1:src-op (0=3DNONE,1=3DAVRG,2=3DAVRG-UP), %2:dst-op=
 (NONE/AVRG)

%if (%2=3D=3D0) && (%1=3D=3D0)
  PROLOG_NO_AVRG
%else
  PROLOG_AVRG
%endif

.Loop
    ;  mm0..mm3 serves as a 4x4 delay line

%ifndef USE_TABLES

  LOAD 0, 8  ; special case for 1rst/last pixel
  ACCUM2 1,  FIR_R1,  mm0, mm3
  ACCUM2 2,  FIR_R2,  mm0, mm3
  ACCUM2 3,  FIR_R3,  mm0, mm3
  ACCUM2 4,  FIR_R4,  mm0, mm3

  ACCUM2 5,  FIR_R13,  mm0, mm3
  ACCUM2 6,  FIR_R14,  mm0, mm3
  ACCUM2 7,  FIR_R15,  mm0, mm3

%else

%if 0   ; test with no unrolling

  TLOAD 0, 8  ; special case for 1rst/last pixel
  TACCUM2 1,  xvid_FIR_23_19_6_3, xvid_FIR_1_0_0_0  , mm0, mm3
  TACCUM2 2,  xvid_FIR_7_20_20_6, xvid_FIR_3_1_0_0  , mm0, mm3
  TACCUM2 3,  xvid_FIR_3_6_20_20, xvid_FIR_6_3_1_0  , mm0, mm3
  TACCUM2 4,  xvid_FIR_1_3_6_20 , xvid_FIR_20_6_3_1 , mm0, mm3
  TACCUM2 5,  xvid_FIR_0_1_3_6  , xvid_FIR_20_20_6_3, mm0, mm3
  TACCUM2 6,  xvid_FIR_0_0_1_3  , xvid_FIR_6_20_20_7, mm0, mm3
  TACCUM2 7,  xvid_FIR_0_0_0_1  , xvid_FIR_3_6_19_23, mm0, mm3

%else  ; test with unrolling (little faster, but not much)

  movzx eax, byte [esi]
  movzx edx, byte [esi+8]
  movq mm0, [xvid_FIR_14_3_2_1 + eax*8 ]
  movzx eax, byte [esi+1]
  movq mm3, [xvid_FIR_1_2_3_14 + edx*8 ]
  paddw mm0, mm7
  paddw mm3, mm7

  movzx edx, byte [esi+2]
  paddw mm0, [xvid_FIR_23_19_6_3 + eax*8]
  paddw mm3, [xvid_FIR_1_0_0_0 + eax*8]

  movzx eax, byte [esi+3]
  paddw mm0, [xvid_FIR_7_20_20_6 + edx*8]
  paddw mm3, [xvid_FIR_3_1_0_0 + edx*8]

  movzx edx, byte [esi+4]
  paddw mm0, [xvid_FIR_3_6_20_20 + eax*8]
  paddw mm3, [xvid_FIR_6_3_1_0 + eax*8]

  movzx eax, byte [esi+5]
  paddw mm0, [xvid_FIR_1_3_6_20 + edx*8]
  paddw mm3, [xvid_FIR_20_6_3_1 + edx*8]

  movzx edx, byte [esi+6]
  paddw mm0, [xvid_FIR_0_1_3_6 + eax*8]
  paddw mm3, [xvid_FIR_20_20_6_3 + eax*8]

  movzx eax, byte [esi+7]
  paddw mm0, [xvid_FIR_0_0_1_3 + edx*8]
  paddw mm3, [xvid_FIR_6_20_20_7 + edx*8]

  paddw mm0, [xvid_FIR_0_0_0_1 + eax*8]
  paddw mm3, [xvid_FIR_3_6_19_23 + eax*8]

%endif

%endif    ; !USE_TABLES

  psraw mm0, 5
  psraw mm3, 5
  packuswb mm0, mm3

%if (%1=3D=3D1)
  MIX mm0, esi, ebx
%elif (%1=3D=3D2)
  MIX mm0, esi+1, ebx
%endif
%if (%2=3D=3D1)
  MIX mm0, edi, Rounder1_MMX
%endif

  movq [edi], mm0

  add edi, ebp
  add esi, ebp
  dec ecx
  jg .Loop

%if (%2=3D=3D0) && (%1=3D=3D0)
  EPILOG_NO_AVRG
%else
  EPILOG_AVRG
%endif

%endmacro

;//////////////////////////////////////////////////////////////////////
;// 16x? copy Functions

xvid_H_Pass_16_mmx:
  H_PASS_16 0, 0
xvid_H_Pass_Avrg_16_mmx:
  H_PASS_16 1, 0
xvid_H_Pass_Avrg_Up_16_mmx:
  H_PASS_16 2, 0

;//////////////////////////////////////////////////////////////////////
;// 8x? copy Functions

xvid_H_Pass_8_mmx:
  H_PASS_8 0, 0
xvid_H_Pass_Avrg_8_mmx:
  H_PASS_8 1, 0
xvid_H_Pass_Avrg_Up_8_mmx:
  H_PASS_8 2, 0

;//////////////////////////////////////////////////////////////////////
;// 16x? avrg Functions

xvid_H_Pass_Add_16_mmx:
  H_PASS_16 0, 1
xvid_H_Pass_Avrg_Add_16_mmx:
  H_PASS_16 1, 1
xvid_H_Pass_Avrg_Up_Add_16_mmx:
  H_PASS_16 2, 1

;//////////////////////////////////////////////////////////////////////
;// 8x? avrg Functions

xvid_H_Pass_8_Add_mmx:
  H_PASS_8 0, 1
xvid_H_Pass_Avrg_8_Add_mmx:
  H_PASS_8 1, 1
xvid_H_Pass_Avrg_Up_8_Add_mmx:
  H_PASS_8 2, 1



;//////////////////////////////////////////////////////////////////////
;//
;// All vertical passes
;//
;//////////////////////////////////////////////////////////////////////

%macro V_LOAD 1  ; %1=3DLast?

  movd mm4, [edx]
  pxor mm6, mm6
%if (%1=3D=3D0)
  add edx, ebp
%endif
  punpcklbw mm4, mm6

%endmacro

%macro V_ACC1 2   ; %1:reg; 2:tap
  pmullw mm4, [%2]
  paddw %1, mm4
%endmacro

%macro V_ACC2 4   ; %1-%2: regs, %3-%4: taps
  movq mm5, mm4
  movq mm6, mm4
  pmullw mm5, [%3]
  pmullw mm6, [%4]
  paddw %1, mm5
  paddw %2, mm6
%endmacro

%macro V_ACC2l 4   ; %1-%2: regs, %3-%4: taps
  movq mm5, mm4
  pmullw mm5, [%3]
  pmullw mm4, [%4]
  paddw %1, mm5
  paddw %2, mm4
%endmacro

%macro V_ACC4 8   ; %1-%4: regs, %5-%8: taps
  V_ACC2 %1,%2, %5,%6
  V_ACC2l %3,%4, %7,%8
%endmacro


%macro V_MIX 3  ; %1:dst-reg, %2:src, %3: rounder
  pxor mm6, mm6
  movq mm4, [%2]
  punpcklbw %1, mm6
  punpcklbw mm4, mm6
  paddusw %1, mm4
  paddusw %1, [%3]
  psrlw %1, 1
  packuswb %1, %1
%endmacro

%macro V_STORE 4    ; %1-%2: mix ops, %3: reg, %4:last?

  psraw %3, 5
  packuswb %3, %3

%if (%1=3D=3D1)
  V_MIX %3, esi, ebx
  add esi, ebp
%elif (%1=3D=3D2)
  add esi, ebp
  V_MIX %3, esi, ebx
%endif
%if (%2=3D=3D1)
  V_MIX %3, edi, Rounder1_MMX
%endif

  movd eax, %3
  mov [edi], eax

%if (%4=3D=3D0)
  add edi, ebp
%endif

%endmacro

;//////////////////////////////////////////////////////////////////////

%macro V_PASS_16  2   ; %1:src-op (0=3DNONE,1=3DAVRG,2=3DAVRG-UP), %2:dst-o=
p (NONE/AVRG)

%if (%2=3D=3D0) && (%1=3D=3D0)
  PROLOG_NO_AVRG
%else
  PROLOG_AVRG
%endif

    ; we process one stripe of 4x16 pixel each time.
    ; the size (3rd argument) is meant to be a multiple of 4
    ;  mm0..mm3 serves as a 4x4 delay line

.Loop

  push edi
  push esi      ; esi is preserved for src-mixing
  mov edx, esi

    ; ouput rows [0..3], from input rows [0..8]

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2,  FIR_Cm1
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3,  FIR_Cm6, FIR_C20, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3,  FIR_Cm6, FIR_C20
  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1,  FIR_C3
  V_ACC1 mm3, FIR_Cm6
  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1
  V_STORE %1, %2, mm3, 0

    ; ouput rows [4..7], from input rows [1..11] (!!)

  mov esi, [esp]
  lea edx, [esi+ebp]

  lea esi, [esi+4*ebp]  ; for src-mixing
  push esi              ; this will be the new value for next round

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm0, FIR_Cm1

  V_LOAD 0
  V_ACC2l mm0, mm1, FIR_C3,  FIR_Cm1

  V_LOAD 0
  V_ACC2 mm0, mm1, FIR_Cm6,  FIR_C3
  V_ACC1 mm2, FIR_Cm1

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20, FIR_Cm6, FIR_C3, FIR_Cm1
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20, FIR_C20, FIR_Cm6, FIR_C3
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm6, FIR_C20, FIR_C20, FIR_Cm6
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3,  FIR_Cm6, FIR_C20, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3,  FIR_Cm6, FIR_C20
  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1,  FIR_C3
  V_ACC1 mm3, FIR_Cm6
  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1
  V_STORE %1, %2, mm3, 0

    ; ouput rows [8..11], from input rows [5..15]

  pop esi
  lea edx, [esi+ebp]

  lea esi, [esi+4*ebp]  ; for src-mixing
  push esi              ; this will be the new value for next round

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm0, FIR_Cm1

  V_LOAD 0
  V_ACC2l mm0, mm1, FIR_C3,  FIR_Cm1

  V_LOAD 0
  V_ACC2 mm0, mm1, FIR_Cm6,  FIR_C3
  V_ACC1 mm2, FIR_Cm1

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20, FIR_Cm6, FIR_C3, FIR_Cm1
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C20, FIR_C20, FIR_Cm6, FIR_C3
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm6, FIR_C20, FIR_C20, FIR_Cm6
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3,  FIR_Cm6, FIR_C20, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3,  FIR_Cm6, FIR_C20

  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1,  FIR_C3
  V_ACC1 mm3, FIR_Cm6
  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1, FIR_C3
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1
  V_STORE %1, %2, mm3, 0


    ; ouput rows [12..15], from input rows [9.16]

  pop esi
  lea edx, [esi+ebp]

%if (%1!=3D0)
  lea esi, [esi+4*ebp]  ; for src-mixing
%endif

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm3, FIR_Cm1

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1,  FIR_C3

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1,  FIR_C3
  V_ACC1 mm3, FIR_Cm6

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3,  FIR_Cm6, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3,  FIR_Cm6, FIR_C20, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3
  V_LOAD 1
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2, FIR_Cm1

  V_STORE %1, %2, mm3, 0
  V_STORE %1, %2, mm2, 0
  V_STORE %1, %2, mm1, 0
  V_STORE %1, %2, mm0, 1

    ; ... next 4 columns

  pop esi
  pop edi
  add esi, 4
  add edi, 4
  sub ecx, 4
  jg .Loop

%if (%2=3D=3D0) && (%1=3D=3D0)
  EPILOG_NO_AVRG
%else
  EPILOG_AVRG
%endif

%endmacro

;//////////////////////////////////////////////////////////////////////

%macro V_PASS_8  2   ; %1:src-op (0=3DNONE,1=3DAVRG,2=3DAVRG-UP), %2:dst-op=
 (NONE/AVRG)

%if (%2=3D=3D0) && (%1=3D=3D0)
  PROLOG_NO_AVRG
%else
  PROLOG_AVRG
%endif

    ; we process one stripe of 4x8 pixel each time
    ; the size (3rd argument) is meant to be a multiple of 4
    ;  mm0..mm3 serves as a 4x4 delay line
.Loop

  push edi
  push esi      ; esi is preserved for src-mixing
  mov edx, esi

    ; ouput rows [0..3], from input rows [0..8]

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2,  FIR_Cm1
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3,  FIR_Cm6, FIR_C20, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3,  FIR_Cm6, FIR_C20
  V_STORE %1, %2, mm0, 0

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1,  FIR_C3
  V_ACC1 mm3, FIR_Cm6

  V_STORE %1, %2, mm1, 0

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1,  FIR_C3
  V_STORE %1, %2, mm2, 0

  V_LOAD 1
  V_ACC1 mm3, FIR_Cm1
  V_STORE %1, %2, mm3, 0

    ; ouput rows [4..7], from input rows [1..9]

  mov esi, [esp]
  lea edx, [esi+ebp]

%if (%1!=3D0)
  lea esi, [esi+4*ebp]  ; for src-mixing
%endif

  movq mm0, mm7
  movq mm1, mm7
  movq mm2, mm7
  movq mm3, mm7

  V_LOAD 0
  V_ACC1 mm3, FIR_Cm1  =20

  V_LOAD 0
  V_ACC2l mm2, mm3, FIR_Cm1,  FIR_C3

  V_LOAD 0
  V_ACC2 mm1, mm2, FIR_Cm1,  FIR_C3
  V_ACC1 mm3, FIR_Cm6

  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm1, FIR_C3,  FIR_Cm6, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C3,  FIR_Cm6, FIR_C20, FIR_C20
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_Cm7, FIR_C20, FIR_C20, FIR_Cm6
  V_LOAD 0
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C23, FIR_C19, FIR_Cm6, FIR_C3
  V_LOAD 1
  V_ACC4  mm0, mm1, mm2, mm3, FIR_C14, FIR_Cm3, FIR_C2, FIR_Cm1

  V_STORE %1, %2, mm3, 0
  V_STORE %1, %2, mm2, 0
  V_STORE %1, %2, mm1, 0
  V_STORE %1, %2, mm0, 1

    ; ... next 4 columns

  pop esi
  pop edi
  add esi, 4
  add edi, 4
  sub ecx, 4
  jg .Loop

%if (%2=3D=3D0) && (%1=3D=3D0)
  EPILOG_NO_AVRG
%else
  EPILOG_AVRG
%endif

%endmacro


;//////////////////////////////////////////////////////////////////////
;// 16x? copy Functions

xvid_V_Pass_16_mmx:
  V_PASS_16 0, 0
xvid_V_Pass_Avrg_16_mmx:
  V_PASS_16 1, 0
xvid_V_Pass_Avrg_Up_16_mmx:
  V_PASS_16 2, 0

;//////////////////////////////////////////////////////////////////////
;// 8x? copy Functions

xvid_V_Pass_8_mmx:
  V_PASS_8 0, 0
xvid_V_Pass_Avrg_8_mmx:
  V_PASS_8 1, 0
xvid_V_Pass_Avrg_Up_8_mmx:
  V_PASS_8 2, 0

;//////////////////////////////////////////////////////////////////////
;// 16x? avrg Functions

xvid_V_Pass_Add_16_mmx:
  V_PASS_16 0, 1
xvid_V_Pass_Avrg_Add_16_mmx:
  V_PASS_16 1, 1
xvid_V_Pass_Avrg_Up_Add_16_mmx:
  V_PASS_16 2, 1

;//////////////////////////////////////////////////////////////////////
;// 8x? avrg Functions

xvid_V_Pass_8_Add_mmx:
  V_PASS_8 0, 1
xvid_V_Pass_Avrg_8_Add_mmx:
  V_PASS_8 1, 1
xvid_V_Pass_Avrg_Up_8_Add_mmx:
  V_PASS_8 2, 1

;//////////////////////////////////////////////////////////////////////

--=-dLq130355Ig4zdyzCliD--