dslinux/user/pixil/packages/dvdview/dvdview/src/video12/modules idct_arai.cc idct_arai.hh mcomp_sgl_mmx.cc mcomp_sgl_mmx.hh mcomp_sgl_simple.cc mcomp_sgl_simple.hh mmxidct.S

amadeus dslinux_amadeus at user.in-berlin.de
Tue Oct 3 13:26:04 CEST 2006


Update of /cvsroot/dslinux/dslinux/user/pixil/packages/dvdview/dvdview/src/video12/modules
In directory antilope:/tmp/cvs-serv11916/packages/dvdview/dvdview/src/video12/modules

Added Files:
	idct_arai.cc idct_arai.hh mcomp_sgl_mmx.cc mcomp_sgl_mmx.hh 
	mcomp_sgl_simple.cc mcomp_sgl_simple.hh mmxidct.S 
Log Message:
adding pristine copy of pixil to HEAD so I can branch from it

--- NEW FILE: mmxidct.S ---
/* This code is basically the Intel-DCT code, found in the
   LiViD source tree, slightly modified to fit into dvdview. */
	
/*
 * the input data is tranposed and each 16 bit element in the 8x8 matrix
 * is left aligned:
 * for example in 11...1110000 format
 * If the iDCT is of I macroblock then 0.5 needs to be added to the;DC Component
 * (element[0][0] of the matrix)
 */

/* extrn re_matrix */

.data
	.align 16
	.type	 preSC, at object
preSC:  .short  16384,22725,21407,19266,16384,12873,8867,4520
        .short  22725,31521,29692,26722,22725,17855,12299,6270
        .short  21407,29692,27969,25172,21407,16819,11585,5906
        .short  19266,26722,25172,22654,19266,15137,10426,5315
        .short  16384,22725,21407,19266,16384,12873,8867,4520
        .short  12873,17855,16819,15137,25746,20228,13933,7103
        .short  17734,24598,23170,20853,17734,13933,9597,4892
        .short  18081,25080,23624,21261,18081,14206,9785,4988
	.size	 preSC,128
	.align 8
	.type	x0005000200010001, at object
	.size	x0005000200010001,8
x0005000200010001:
	.long	0x00010001,0x00050002
	.align 8
	.type	x0040000000000000, at object
	.size	x0040000000000000,8
x0040000000000000:
	.long	0, 0x00400000
	.align 8
	.type	x5a825a825a825a82, at object
	.size	x5a825a825a825a82,8
x5a825a825a825a82:
	.long	0x5a825a82, 0x5a825a82
	.align 8
	.type	x539f539f539f539f, at object
	.size	x539f539f539f539f,8
x539f539f539f539f:
	.long	0x539f539f,0x539f539f
	.align 8
	.type	x4546454645464546, at object
	.size	x4546454645464546,8
x4546454645464546:
	.long	0x45464546,0x45464546
	.align 8
	.type	x61f861f861f861f8, at object
	.size	x61f861f861f861f8,8
x61f861f861f861f8:
	.long	0x61f861f8,0x61f861f8
	.align 8
	.type	 scratch1, at object
	.size	 scratch1,8
scratch1:
	.long 0,0
	.align 8
	.type	 scratch3, at object
	.size	 scratch3,8
scratch3:
	.long 0,0
	.align 8
	.type	 scratch5, at object
	.size	 scratch5,8
scratch5:
	.long 0,0
	.align 8
	.type	 scratch7, at object
	.size	 scratch7,8
scratch7:
	.long 0,0
	.type	 x0, at object
	.size	 x0,8
x0:
	.long 0,0
	.align 8
.text
	.align 4
.globl IDCT_mmx
	.type	 IDCT_mmx, at function
IDCT_mmx:
	pushl %ebp
	movl %esp,%ebp
	pushl %ebx
	pushl %ecx
	pushl %edx
	pushl %esi
	pushl %edi
        movl 8(%ebp),%edi		/* output XXX */
       	movl 12(%ebp),%esi		/* source matrix */
#if 0
	movq (%esi), %mm0
	movq 8(%esi), %mm1
	psllw $4, %mm0
	movq 16(%esi), %mm2
	psllw $4, %mm1
	movq 24(%esi), %mm3
	psllw $4, %mm2
	movq 32(%esi), %mm4
	psllw $4, %mm3
	movq 40(%esi), %mm5
	psllw $4, %mm4
	movq 48(%esi), %mm6
	psllw $4, %mm5
	movq 56(%esi), %mm7
	psllw $4, %mm6
	psllw $4, %mm7
	movq %mm0,  (%esi)
	movq %mm1, 8(%esi)
	movq %mm2,16(%esi)
	movq %mm3,24(%esi)
	movq %mm4,32(%esi)
	movq %mm5,40(%esi)
	movq %mm6,48(%esi)
	movq %mm7,56(%esi)
	movq 64(%esi), %mm0
	movq 72(%esi), %mm1
	psllw $4, %mm0
	movq 80(%esi), %mm2
	psllw $4, %mm1
	movq 88(%esi), %mm3
	psllw $4, %mm2
	movq 96(%esi), %mm4
	psllw $4, %mm3
	movq 104(%esi), %mm5
	psllw $4, %mm4
	movq 112(%esi), %mm6
	psllw $4, %mm5
	movq 120(%esi), %mm7
	psllw $4, %mm6
	psllw $4, %mm7
	movq %mm0,64(%esi)
	movq %mm1,72(%esi)
	movq %mm2,80(%esi)
	movq %mm3,88(%esi)
	movq %mm4,96(%esi)
	movq %mm5,104(%esi)
	movq %mm6,112(%esi)
	movq %mm7,120(%esi)
#endif
	leal preSC, %ecx
/* column 0: even part
 * use V4, V12, V0, V8 to produce V22..V25
 */
	movq 8*12(%ecx), %mm0	/* maybe the first mul can be done together */
				/* with the dequantization in iHuff module */
	pmulhw 8*12(%esi), %mm0		/* V12 */
	movq 8*4(%ecx), %mm1
	pmulhw 8*4(%esi), %mm1		/* V4 */
	movq (%ecx), %mm3
	psraw $1, %mm0			/* t64=t66 */
	pmulhw (%esi), %mm3		/* V0 */
	movq 8*8(%ecx), %mm5		/* duplicate V4 */
	movq %mm1, %mm2			/* added 11/1/96 */
	pmulhw 8*8(%esi),%mm5		/* V8 */
	psubsw %mm0, %mm1		/* V16 */
	pmulhw x5a825a825a825a82, %mm1	/* 23170 ->V18 */
	paddsw %mm0, %mm2		/* V17 */
	movq %mm2, %mm0			/* duplicate V17 */
	psraw $1, %mm2			/* t75=t82 */
	psraw $2, %mm0			/* t72 */
	movq %mm3, %mm4			/* duplicate V0 */
	paddsw %mm5, %mm3		/* V19 */
	psubsw %mm5, %mm4		/* V20 ;mm5 free */
/* moved from the block below */
	movq 8*10(%ecx), %mm7
	psraw $1, %mm3			/* t74=t81 */
	movq %mm3, %mm6			/* duplicate t74=t81 */
	psraw $2, %mm4			/* t77=t79 */
	psubsw %mm0, %mm1		/* V21 ; mm0 free */
	paddsw %mm2, %mm3		/* V22 */
	movq %mm1, %mm5			/* duplicate V21 */
	paddsw %mm4, %mm1		/* V23 */
	movq %mm3, 8*4(%esi)		/* V22 */
	psubsw %mm5, %mm4		/* V24; mm5 free */
	movq %mm1, 8*12(%esi)		/* V23 */
	psubsw %mm2, %mm6		/* V25; mm2 free */
	movq %mm4, (%esi)		/* V24 */
/* keep mm6 alive all along the next block */
	/* movq %mm6, 8*8(%esi) 	V25 */
/* column 0: odd part
 * use V2, V6, V10, V14 to produce V31, V39, V40, V41
 */
/* moved above: movq 8*10(%ecx), %mm7 */

	pmulhw 8*10(%esi), %mm7		/* V10 */
	movq 8*6(%ecx), %mm0
	pmulhw 8*6(%esi), %mm0		/* V6 */
	movq 8*2(%ecx), %mm5
	movq %mm7, %mm3			/* duplicate V10 */
	pmulhw 8*2(%esi), %mm5		/* V2 */
	movq 8*14(%ecx), %mm4
	psubsw %mm0, %mm7		/* V26 */
	pmulhw 8*14(%esi), %mm4		/* V14 */
	paddsw %mm0, %mm3		/* V29 ; free mm0 */
	movq %mm7, %mm1			/* duplicate V26 */
	psraw $1, %mm3			/* t91=t94 */
	pmulhw x539f539f539f539f,%mm7	/* V33 */
	psraw $1, %mm1			/* t96 */
	movq %mm5, %mm0			/* duplicate V2 */
	psraw $2, %mm4			/* t85=t87 */
	paddsw %mm4,%mm5		/* V27 */
	psubsw %mm4, %mm0		/* V28 ; free mm4 */
	movq %mm0, %mm2			/* duplicate V28 */
	psraw $1, %mm5			/* t90=t93 */
	pmulhw x4546454645464546,%mm0	/* V35 */
	psraw $1, %mm2			/* t97 */
	movq %mm5, %mm4			/* duplicate t90=t93 */
	psubsw %mm2, %mm1		/* V32 ; free mm2 */
	pmulhw x61f861f861f861f8,%mm1	/* V36 */
	psllw $1, %mm7			/* t107 */
	paddsw %mm3, %mm5		/* V31 */
	psubsw %mm3, %mm4		/* V30 ; free mm3 */
	pmulhw x5a825a825a825a82,%mm4	/* V34 */
	nop
	psubsw %mm1, %mm0		/* V38 */
	psubsw %mm7, %mm1		/* V37 ; free mm7 */
	psllw $1, %mm1			/* t114 */
/* move from the next block */
	movq %mm6, %mm3			/* duplicate V25 */
/* move from the next block */
	movq 8*4(%esi), %mm7		/* V22 */
	psllw $1, %mm0			/* t110 */
	psubsw %mm5, %mm0		/* V39 (mm5 needed for next block) */
	psllw $2, %mm4			/* t112 */
/* moved from the next block */
	movq 8*12(%esi), %mm2		/* V23 */
	psubsw %mm0, %mm4		/* V40 */
	paddsw %mm4, %mm1		/* V41; free mm0 */
/* moved from the next block */
	psllw $1, %mm2			/* t117=t125 */
/* column 0: output butterfly */
/* moved above:
 * movq %mm6, %mm3			duplicate V25
 * movq 8*4(%esi), %mm7			V22
 * movq 8*12(%esi), %mm2		V23
 * psllw $1, %mm2			t117=t125
 */
	psubsw %mm1, %mm6		/* tm6 */
	paddsw %mm1, %mm3		/* tm8; free mm1 */
	movq %mm7, %mm1			/* duplicate V22 */
	paddsw %mm5, %mm7		/* tm0 */
	movq %mm3, 8*8(%esi)		/* tm8; free mm3 */
	psubsw %mm5, %mm1		/* tm14; free mm5 */
	movq %mm6, 8*6(%esi)		/* tm6; free mm6 */
	movq %mm2, %mm3			/* duplicate t117=t125 */
	movq (%esi), %mm6		/* V24 */
	paddsw %mm0, %mm2		/* tm2 */
	movq %mm7, (%esi)		/* tm0; free mm7 */
	psubsw %mm0, %mm3		/* tm12; free mm0 */
	movq %mm1, 8*14(%esi)		/* tm14; free mm1 */
	psllw $1, %mm6			/* t119=t123 */
	movq %mm2, 8*2(%esi)		/* tm2; free mm2 */
	movq %mm6, %mm0			/* duplicate t119=t123 */
	movq %mm3, 8*12(%esi)		/* tm12; free mm3 */
	paddsw %mm4, %mm6		/* tm4 */
/* moved from next block */
	movq 8*5(%ecx), %mm1
	psubsw %mm4, %mm0		/* tm10; free mm4 */
/* moved from next block */
	pmulhw 8*5(%esi), %mm1		/* V5 */
	movq %mm6, 8*4(%esi)		/* tm4; free mm6 */
	movq %mm0, 8*10(%esi)		/* tm10; free mm0 */
/* column 1: even part
 * use V5, V13, V1, V9 to produce V56..V59
 */
/* moved to prev block:
 *	movq 8*5(%ecx), %mm1
 *	pmulhw 8*5(%esi), %mm1		 V5
 */
	movq 8*13(%ecx), %mm7
	psllw $1, %mm1			/* t128=t130 */
	pmulhw 8*13(%esi), %mm7		/* V13 */
	movq %mm1, %mm2			/* duplicate t128=t130 */
	movq 8(%ecx), %mm3
	pmulhw 8(%esi), %mm3		/* V1 */
	movq 8*9(%ecx), %mm5
	psubsw %mm7, %mm1		/* V50 */
	pmulhw 8*9(%esi), %mm5		/* V9 */
	paddsw %mm7, %mm2		/* V51 */
	pmulhw x5a825a825a825a82, %mm1	/* 23170 ->V52 */
	movq %mm2, %mm6			/* duplicate V51 */
	psraw $1, %mm2			/* t138=t144 */
	movq %mm3, %mm4			/* duplicate V1 */
	psraw $2, %mm6			/* t136 */
	paddsw %mm5, %mm3		/* V53 */
	psubsw %mm5, %mm4		/* V54 ;mm5 free */
	movq %mm3, %mm7			/* duplicate V53 */
/* moved from next block */
	movq 8*11(%ecx), %mm0
	psraw $1, %mm4			/* t140=t142 */
	psubsw %mm6, %mm1		/* V55 ; mm6 free */
	paddsw %mm2, %mm3		/* V56 */
	movq %mm4, %mm5			/* duplicate t140=t142 */
	paddsw %mm1, %mm4		/* V57 */
	movq %mm3, 8*5(%esi)		/* V56 */
	psubsw %mm1, %mm5		/* V58; mm1 free */
	movq %mm4, 8*13(%esi)		/* V57 */
	psubsw %mm2, %mm7		/* V59; mm2 free */
	movq %mm5, 8*9(%esi)		/* V58 */
/* keep mm7 alive all along the next block
 *	movq %mm7, 8(%esi)		V59
 * moved above
 *	movq 8*11(%ecx), %mm0
 */
	pmulhw 8*11(%esi), %mm0		/* V11 */
	movq 8*7(%ecx), %mm6
	pmulhw 8*7(%esi), %mm6		/* V7 */
	movq 8*15(%ecx), %mm4
	movq %mm0, %mm3			/* duplicate V11 */
	pmulhw 8*15(%esi), %mm4		/* V15 */
	movq 8*3(%ecx), %mm5
	psllw $1, %mm6			/* t146=t152 */
	pmulhw 8*3(%esi), %mm5		/* V3 */
	paddsw %mm6, %mm0		/* V63 */
/* note that V15 computation has a correction step: 
 * this is a 'magic' constant that rebiases the results to be closer to the
 * expected result.  this magic constant can be refined to reduce the error
 * even more by doing the correction step in a later stage when the number
 * is actually multiplied by 16
 */
	paddw x0005000200010001, %mm4
	psubsw %mm6, %mm3		/* V60 ; free mm6 */
	psraw $1, %mm0			/* t154=t156 */
	movq %mm3, %mm1			/* duplicate V60 */
	pmulhw x539f539f539f539f, %mm1	/* V67 */
	movq %mm5, %mm6			/* duplicate V3 */
	psraw $2, %mm4			/* t148=t150 */
	paddsw %mm4, %mm5		/* V61 */
	psubsw %mm4, %mm6		/* V62 ; free mm4 */
	movq %mm5, %mm4			/* duplicate V61 */
	psllw $1, %mm1			/* t169 */
	paddsw %mm0, %mm5		/* V65 -> result */
	psubsw %mm0, %mm4		/* V64 ; free mm0 */
	pmulhw x5a825a825a825a82, %mm4	/* V68 */
	psraw $1, %mm3			/* t158 */
	psubsw %mm6, %mm3		/* V66 */
	movq %mm5, %mm2			/* duplicate V65 */
	pmulhw x61f861f861f861f8, %mm3	/* V70 */
	psllw $1, %mm6			/* t165 */
	pmulhw x4546454645464546, %mm6	/* V69 */
	psraw $1, %mm2			/* t172 */
/* moved from next block */
	movq 8*5(%esi), %mm0		/* V56 */
	psllw $1, %mm4			/* t174 */
/* moved from next block */
	psraw $1, %mm0			/* t177=t188 */
	nop
	psubsw %mm3, %mm6		/* V72 */
	psubsw %mm1, %mm3		/* V71 ; free mm1 */
	psubsw %mm2, %mm6		/* V73 ; free mm2 */
/* moved from next block */
	psraw $1, %mm5			/* t178=t189 */
	psubsw %mm6, %mm4		/* V74 */
/* moved from next block */
	movq %mm0, %mm1			/* duplicate t177=t188 */
	paddsw %mm4, %mm3		/* V75 */
/* moved from next block */
	paddsw %mm5, %mm0		/* tm1 */
/* location
 *  5 - V56
 * 13 - V57
 *  9 - V58
 *  X - V59, mm7
 *  X - V65, mm5
 *  X - V73, mm6
 *  X - V74, mm4
 *  X - V75, mm3
 * free mm0, mm1 & mm2
 * moved above
 *	movq 8*5(%esi), %mm0		V56
 *	psllw $1, %mm0			t177=t188 ! new !!
 *	psllw $1, %mm5			t178=t189 ! new !!
 *	movq %mm0, %mm1			duplicate t177=t188
 *	paddsw %mm5, %mm0		tm1
 */
	movq 8*13(%esi), %mm2		/* V57 */
	psubsw %mm5, %mm1		/* tm15; free mm5 */
	movq %mm0, 8(%esi)		/* tm1; free mm0 */
	psraw $1, %mm7			/* t182=t184 ! new !! */
/* save the store as used directly in the transpose
 *	movq %mm1, 120(%esi)		tm15; free mm1
 */
	movq %mm7, %mm5			/* duplicate t182=t184 */
	psubsw %mm3, %mm7		/* tm7 */
	paddsw %mm3, %mm5		/* tm9; free mm3 */
	movq 8*9(%esi), %mm0		/* V58 */
	movq %mm2, %mm3			/* duplicate V57 */
	movq %mm7, 8*7(%esi)		/* tm7; free mm7 */
	psubsw %mm6, %mm3		/* tm13 */
	paddsw %mm6, %mm2		/* tm3 ; free mm6 */
/* moved up from the transpose */
	movq %mm3, %mm7
/* moved up from the transpose */
	punpcklwd %mm1, %mm3
	movq %mm0, %mm6			/* duplicate V58 */
	movq %mm2, 8*3(%esi)		/* tm3; free mm2 */
	paddsw %mm4, %mm0		/* tm5 */
	psubsw %mm4, %mm6		/* tm11; free mm4 */
/* moved up from the transpose */
	punpckhwd %mm1, %mm7
	movq %mm0, 8*5(%esi)		/* tm5; free mm0 */
/* moved up from the transpose */
	movq %mm5, %mm2
/* transpose - M4 part
 *  ---------       ---------
 * | M1 | M2 |     | M1'| M3'|
 *  ---------  -->  ---------
 * | M3 | M4 |     | M2'| M4'|
 *  ---------       ---------
 * Two alternatives: use full mmword approach so the following code can be
 * scheduled before the transpose is done without stores, or use the faster
 * half mmword stores (when possible)
 */
	movd %mm3, 8*9+4(%esi)		/* MS part of tmt9 */
	punpcklwd %mm6, %mm5
	movd %mm7, 8*13+4(%esi)		/* MS part of tmt13 */
	punpckhwd %mm6, %mm2
	movd %mm5, 8*9(%esi)		/* LS part of tmt9 */
	punpckhdq %mm3, %mm5		/* free mm3 */
	movd %mm2, 8*13(%esi)		/* LS part of tmt13 */
	punpckhdq %mm7, %mm2		/* free mm7 */
/* moved up from the M3 transpose */
	movq 8*8(%esi), %mm0
/* moved up from the M3 transpose */
	movq 8*10(%esi), %mm1
/* moved up from the M3 transpose */
	movq %mm0, %mm3
/* shuffle the rest of the data, and write it with 2 mmword writes */
	movq %mm5, 8*11(%esi)		/* tmt11 */
/* moved up from the M3 transpose */
	punpcklwd %mm1, %mm0
	movq %mm2, 8*15(%esi)		/* tmt15 */
/* moved up from the M3 transpose */
	punpckhwd %mm1, %mm3
/* transpose - M3 part
 * moved up to previous code section
 *	movq 8*8(%esi), %mm0
 *	movq 8*10(%esi), %mm1
 *	movq %mm0, %mm3
 *	punpcklwd %mm1, %mm0
 *	punpckhwd %mm1, %mm3
 */
	movq 8*12(%esi), %mm6
	movq 8*14(%esi), %mm4
	movq %mm6, %mm2
/* shuffle the data and write the lower parts of the transposed in 4 dwords */
	punpcklwd %mm4, %mm6
	movq %mm0, %mm1
	punpckhdq %mm6, %mm1
	movq %mm3, %mm7
	punpckhwd %mm4, %mm2		/* free mm4 */
	punpckldq %mm6, %mm0		/* free mm6 */
/* moved from next block */
	movq 8*13(%esi), %mm4		/* tmt13 */
	punpckldq %mm2, %mm3
	punpckhdq %mm2, %mm7		/* free mm2 */
/* moved from next block */
	movq %mm3, %mm5			/* duplicate tmt5 */
/* column 1: even part (after transpose)
* moved above
*	movq %mm3, %mm5			duplicate tmt5
*	movq 8*13(%esi), %mm4		tmt13
*/
	psubsw %mm4, %mm3		/* V134 */
	pmulhw x5a825a825a825a82, %mm3	/* 23170 ->V136 */
	movq 8*9(%esi), %mm6		/* tmt9 */
	paddsw %mm4, %mm5		/* V135 ; mm4 free */
	movq %mm0, %mm4			/* duplicate tmt1 */
	paddsw %mm6, %mm0		/* V137 */
	psubsw %mm6, %mm4		/* V138 ; mm6 free */
	psllw $2, %mm3			/* t290 */
	psubsw %mm5, %mm3		/* V139 */
	movq %mm0, %mm6			/* duplicate V137 */
	paddsw %mm5, %mm0		/* V140 */
	movq %mm4, %mm2			/* duplicate V138 */
	paddsw %mm3, %mm2		/* V141 */
	psubsw %mm3, %mm4		/* V142 ; mm3 free */
	movq %mm0, 8*9(%esi)		/* V140 */
	psubsw %mm5, %mm6		/* V143 ; mm5 free */
/* moved from next block */
	movq 8*11(%esi), %mm0		/* tmt11 */
	movq %mm2, 8*13(%esi)		/* V141 */
/* moved from next block */
	movq %mm0, %mm2			/* duplicate tmt11 */
/* column 1: odd part (after transpose) */
/* moved up to the prev block
 *	movq 8*11(%esi), %mm0		tmt11
 *	movq %mm0, %mm2			duplicate tmt11
 */
	movq 8*15(%esi), %mm5		/* tmt15 */
	psubsw %mm7, %mm0		/* V144 */
	movq %mm0, %mm3			/* duplicate V144 */
	paddsw %mm7, %mm2		/* V147 ; free mm7 */
	pmulhw x539f539f539f539f, %mm0	/* 21407-> V151 */
	movq %mm1, %mm7			/* duplicate tmt3 */
	paddsw %mm5, %mm7		/* V145 */
	psubsw %mm5, %mm1		/* V146 ; free mm5 */
	psubsw %mm1, %mm3		/* V150 */
	movq %mm7, %mm5			/* duplicate V145 */
	pmulhw x4546454645464546, %mm1	/* 17734-> V153 */
	psubsw %mm2, %mm5		/* V148 */
	pmulhw x61f861f861f861f8, %mm3	/* 25080-> V154 */
	psllw $2, %mm0			/* t311 */
	pmulhw x5a825a825a825a82, %mm5	/* 23170-> V152 */
	paddsw %mm2, %mm7		/* V149 ; free mm2 */
	psllw $1, %mm1			/* t313 */
	nop	/* without the nop - freeze here for one clock */
	movq %mm3, %mm2			/* duplicate V154 */
	psubsw %mm0, %mm3		/* V155 ; free mm0 */
	psubsw %mm2, %mm1		/* V156 ; free mm2 */
/* moved from the next block */
	movq %mm6, %mm2			/* duplicate V143 */
/* moved from the next block */
	movq 8*13(%esi), %mm0		/* V141 */
	psllw $1, %mm1			/* t315 */
	psubsw %mm7, %mm1		/* V157 (keep V149) */
	psllw $2, %mm5			/* t317 */
	psubsw %mm1, %mm5		/* V158 */
	psllw $1, %mm3			/* t319 */
	paddsw %mm5, %mm3		/* V159 */
/* column 1: output butterfly (after transform)
 * moved to the prev block
 *	movq %mm6, %mm2			duplicate V143
 *	movq 8*13(%esi), %mm0		V141
 */
	psubsw %mm3, %mm2		/* V163 */
	paddsw %mm3, %mm6		/* V164 ; free mm3 */
	movq %mm4, %mm3			/* duplicate V142 */
	psubsw %mm5, %mm4		/* V165 ; free mm5 */
	movq %mm2, scratch7		/* out7 */
	psraw $4, %mm6
	psraw $4, %mm4
	paddsw %mm5, %mm3		/* V162 */
	movq 8*9(%esi), %mm2		/* V140 */
	movq %mm0, %mm5			/* duplicate V141 */
/* in order not to perculate this line up,
 * we read 72(%esi) very near to this location
 */

        movq %mm6, 8*9(%edi)		/* out9 */
	paddsw %mm1, %mm0		/* V161 */
	movq %mm3, scratch5		/* out5 */
	psubsw %mm1, %mm5		/* V166 ; free mm1 */
	movq %mm4, 8*11(%edi)		/* out11 */
	psraw $4, %mm5
	movq %mm0, scratch3		/* out3 */
	movq %mm2, %mm4			/* duplicate V140 */
	movq %mm5, 8*13(%edi)		/* out13 */
	paddsw %mm7, %mm2		/* V160 */
/* moved from the next block */
	movq 8(%esi), %mm0
	psubsw %mm7, %mm4		/* V167 ; free mm7 */
/* moved from the next block */
	movq 8*3(%esi), %mm7
	psraw $4, %mm4
	movq %mm2, scratch1		/* out1 */
/* moved from the next block */
	movq %mm0, %mm1
	movq %mm4, 8*15(%edi)		/* out15 */
/* moved from the next block */
	punpcklwd %mm7, %mm0
/* transpose - M2 parts
 * moved up to the prev block
 *	movq 8(%esi), %mm0
 *	movq 8*3(%esi), %mm7
 *	movq %mm0, %mm1
 *	punpcklwd %mm7, %mm0
 */
	movq 8*5(%esi), %mm5
	punpckhwd %mm7, %mm1
	movq 8*7(%esi), %mm4
	movq %mm5, %mm3
/* shuffle the data and write the lower parts of the trasposed in 4 dwords */
	movd %mm0, 8*8(%esi)		/* LS part of tmt8 */
	punpcklwd %mm4, %mm5
	movd %mm1, 8*12(%esi)		/* LS part of tmt12 */
	punpckhwd %mm4, %mm3
	movd %mm5, 8*8+4(%esi)		/* MS part of tmt8 */
	punpckhdq %mm5, %mm0		/* tmt10 */
	movd %mm3, 8*12+4(%esi)		/* MS part of tmt12 */
	punpckhdq %mm3, %mm1		/* tmt14 */
/* transpose - M1 parts */
	movq (%esi), %mm7
	movq 8*2(%esi), %mm2
	movq %mm7, %mm6
	movq 8*4(%esi), %mm5
	punpcklwd %mm2, %mm7
	movq 8*6(%esi), %mm4
	punpckhwd %mm2, %mm6		/* free mm2 */
	movq %mm5, %mm3
	punpcklwd %mm4, %mm5
	punpckhwd %mm4, %mm3		/* free mm4 */
	movq %mm7, %mm2
	movq %mm6, %mm4
	punpckldq %mm5, %mm7		/* tmt0 */
	punpckhdq %mm5, %mm2		/* tmt2 ; free mm5 */
/* shuffle the rest of the data, and write it with 2 mmword writes */
	punpckldq %mm3, %mm6		/* tmt4 */
/* moved from next block */
	movq %mm2, %mm5			/* duplicate tmt2 */
	punpckhdq %mm3, %mm4		/* tmt6 ; free mm3 */
/* moved from next block */
	movq %mm0, %mm3			/* duplicate tmt10 */
/* column 0: odd part (after transpose)
 *moved up to prev block
 *	movq %mm0, %mm3			duplicate tmt10
 *	movq %mm2, %mm5			duplicate tmt2
 */
	psubsw %mm4, %mm0		/* V110 */
	paddsw %mm4, %mm3		/* V113 ; free mm4 */
	movq %mm0, %mm4			/* duplicate V110 */
	paddsw %mm1, %mm2		/* V111 */
	pmulhw x539f539f539f539f, %mm0	/* 21407-> V117 */
	psubsw %mm1, %mm5		/* V112 ; free mm1 */
	psubsw %mm5, %mm4		/* V116 */
	movq %mm2, %mm1			/* duplicate V111 */
	pmulhw x4546454645464546, %mm5	/* 17734-> V119 */
	psubsw %mm3, %mm2		/* V114 */
	pmulhw x61f861f861f861f8, %mm4	/* 25080-> V120 */
	paddsw %mm3, %mm1		/* V115 ; free mm3 */
	pmulhw x5a825a825a825a82, %mm2	/* 23170-> V118 */
	psllw $2, %mm0			/* t266 */
	movq %mm1, (%esi)		/* save V115 */
	psllw $1, %mm5			/* t268 */
	psubsw %mm4, %mm5		/* V122 */
	psubsw %mm0, %mm4		/* V121 ; free mm0 */
	psllw $1, %mm5			/* t270 */
	psubsw %mm1, %mm5		/* V123 ; free mm1 */
	psllw $2, %mm2			/* t272 */
	psubsw %mm5, %mm2		/* V124 (keep V123) */
	psllw $1, %mm4			/* t274 */
	movq %mm5, 8*2(%esi)		/* save V123 ; free mm5 */
	paddsw %mm2, %mm4		/* V125 (keep V124) */
/* column 0: even part (after transpose) */
	movq 8*12(%esi), %mm0		/* tmt12 */
	movq %mm6, %mm3			/* duplicate tmt4 */
	psubsw %mm0, %mm6		/* V100 */
	paddsw %mm0, %mm3		/* V101 ; free mm0 */
	pmulhw x5a825a825a825a82, %mm6	/* 23170 ->V102 */
	movq %mm7, %mm5			/* duplicate tmt0 */
	movq 8*8(%esi), %mm1		/* tmt8 */
	paddsw %mm1, %mm7		/* V103 */
	psubsw %mm1, %mm5		/* V104 ; free mm1 */
	movq %mm7, %mm0			/* duplicate V103 */
	psllw $2, %mm6			/* t245 */
	paddsw %mm3, %mm7		/* V106 */
	movq %mm5, %mm1			/* duplicate V104 */
	psubsw %mm3, %mm6		/* V105 */
	psubsw %mm3, %mm0		/* V109; free mm3 */
	paddsw %mm6, %mm5		/* V107 */
	psubsw %mm6, %mm1		/* V108 ; free mm6 */
/* column 0: output butterfly (after transform) */
	movq %mm1, %mm3			/* duplicate V108 */
	paddsw %mm2, %mm1		/* out4 */
	psraw $4, %mm1
	psubsw %mm2, %mm3		/* out10 ; free mm2 */
	psraw $4, %mm3
	movq %mm0, %mm6			/* duplicate V109 */
	movq %mm1, 8*4(%edi)		/* out4 ; free mm1 */
	psubsw %mm4, %mm0		/* out6 */
	movq %mm3, 8*10(%edi)		/* out10 ; free mm3 */
	psraw $4, %mm0
	paddsw %mm4, %mm6		/* out8 ; free mm4 */
	movq %mm7, %mm1			/* duplicate V106 */
	movq %mm0, 8*6(%edi)		/* out6 ; free mm0 */
	psraw $4, %mm6
	movq (%esi), %mm4		/* V115 */
	movq %mm6, 8*8(%edi)		/* out8 ; free mm6 */
	movq %mm5, %mm2			/* duplicate V107 */
	movq 8*2(%esi), %mm3		/* V123 */
	paddsw %mm4, %mm7		/* out0 */
/* moved up from next block */
	movq scratch3, %mm0
	psraw $4, %mm7
/* moved up from next block */
	movq scratch5, %mm6 
	psubsw %mm4, %mm1		/* out14 ; free mm4 */
	paddsw %mm3, %mm5		/* out2 */
	psraw $4, %mm1
	movq %mm7, (%edi)		/* out0 ; free mm7 */
	psraw $4, %mm5
	movq %mm1, 8*14(%edi)		/* out14 ; free mm1 */
	psubsw %mm3, %mm2		/* out12 ; free mm3 */
	movq %mm5, 8*2(%edi)		/* out2 ; free mm5 */
	psraw $4, %mm2
/* moved up to the prev block */
	movq scratch7, %mm4
/* moved up to the prev block */
	psraw $4, %mm0
	movq %mm2, 8*12(%edi)		/* out12 ; free mm2 */
/* moved up to the prev block */
	psraw $4, %mm6
/* move back the data to its correct place
* moved up to the prev block
 *	movq scratch3, %mm0
 *	movq scratch5, %mm6
 *	movq scratch7, %mm4
 *	psraw $4, %mm0
 *	psraw $4, %mm6
*/
	movq scratch1, %mm1
	psraw $4, %mm4
	movq %mm0, 8*3(%edi)		/* out3 */
	psraw $4, %mm1
	movq %mm6, 8*5(%edi)		/* out5 */
	movq %mm4, 8*7(%edi)		/* out7 */
	movq %mm1, 8(%edi)		/* out1 */
	popl %edi
	popl %esi
	popl %edx
	popl %ecx
	popl %ebx
	movl %ebp,%esp
	popl %ebp
	ret
.Lfe1:
	.size	 IDCT_mmx,.Lfe1-IDCT_mmx




       	.align 8
.text
	.align 4
.globl IDCT_16bit8bit
	.type	 IDCT_16bit8bit, at function
IDCT_16bit8bit:
	pushl %ebp
	movl %esp,%ebp
	pushl %ebx
	pushl %ecx
	pushl %edx
	pushl %esi
	pushl %edi
        movl 16(%ebp),%ebx		/* output skip */
        movl 12(%ebp),%edi		/* output XXX */
       	movl 8(%ebp),%esi		/* source matrix */

        movq 1*8(%esi),%mm0
        movq 0*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        add %ebx,%edi

        movq 3*8(%esi),%mm0
        movq 2*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        add %ebx,%edi

        movq 5*8(%esi),%mm0
        movq 4*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        add %ebx,%edi

        movq 7*8(%esi),%mm0
        movq 6*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        add %ebx,%edi

        movq 9*8(%esi),%mm0
        movq 8*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        add %ebx,%edi

        movq 11*8(%esi),%mm0
        movq 10*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        add %ebx,%edi

        movq 13*8(%esi),%mm0
        movq 12*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        add %ebx,%edi

        movq 15*8(%esi),%mm0
        movq 14*8(%esi),%mm1
        packuswb %mm0,%mm1
        movq %mm1,(%edi)
        
        popl %edi
	popl %esi
	popl %edx
	popl %ecx
	popl %ebx
	movl %ebp,%esp
	popl %ebp
	ret





       	.align 8
.text
	.align 4
.globl IDCT_16bit8bit_add
	.type	 IDCT_16bit8bit_add, at function
IDCT_16bit8bit_add:
	pushl %ebp
	movl %esp,%ebp
	pushl %ebx
	pushl %ecx
	pushl %edx
	pushl %esi
	pushl %edi
        movl 16(%ebp),%ebx		/* output skip */
        movl 12(%ebp),%edi		/* output XXX */
       	movl 8(%ebp),%esi		/* source matrix */

        pxor %mm7,%mm7
        
        movq (%edi),%mm2
        movq 1*8(%esi),%mm0
        movq %mm2,%mm3
        movq 0*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        add %ebx,%edi
        
        movq (%edi),%mm2
        movq 3*8(%esi),%mm0
        movq %mm2,%mm3
        movq 2*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        add %ebx,%edi
        
        movq (%edi),%mm2
        movq 5*8(%esi),%mm0
        movq %mm2,%mm3
        movq 4*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        add %ebx,%edi
        
        movq (%edi),%mm2
        movq 7*8(%esi),%mm0
        movq %mm2,%mm3
        movq 6*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        add %ebx,%edi
        
        movq (%edi),%mm2
        movq 9*8(%esi),%mm0
        movq %mm2,%mm3
        movq 8*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        add %ebx,%edi
        
        movq (%edi),%mm2
        movq 11*8(%esi),%mm0
        movq %mm2,%mm3
        movq 10*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        add %ebx,%edi
        
        movq (%edi),%mm2
        movq 13*8(%esi),%mm0
        movq %mm2,%mm3
        movq 12*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        add %ebx,%edi
        
        movq (%edi),%mm2
        movq 15*8(%esi),%mm0
        movq %mm2,%mm3
        movq 14*8(%esi),%mm1
        punpcklbw %mm7,%mm3
        punpckhbw %mm7,%mm2
        paddw %mm1,%mm3
        paddw %mm0,%mm2
        packuswb %mm2,%mm3
        movq %mm3,(%edi)
        
        popl %edi
	popl %esi
	popl %edx
	popl %ecx
	popl %ebx
	movl %ebp,%esp
	popl %ebp
	ret



--- NEW FILE: mcomp_sgl_simple.hh ---
/********************************************************************************
  video12/modules/mcomp_scalar.hh

  purpose:

  notes:

  to do:

  author(s):
   - Dirk Farin, farin at ti.uni-mannheim.de

  modifications:
   23/Oct/1999 - Dirk Farin
     - first revision
 ********************************************************************************
    Copyright (C) 1999  Dirk Farin

    This program is distributed under GNU Public License (GPL) as
    outlined in the COPYING file that comes with the source distribution.

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 ********************************************************************************/

#ifndef DVDVIEW_VIDEO12_MODULES_MCOMP_SGL_SIMPLE_HH
#define DVDVIEW_VIDEO12_MODULES_MCOMP_SGL_SIMPLE_HH

#include "error.hh"
#include "video12/viddec_mods.hh"

class MotionCompensation_SglMB_Simple : public MotionCompensation_SglMB
{
public:
  MotionCompensation_SglMB_Simple();

  void DoMotionCompensationSglMB_SGL_H8 (MotionCompensation_SglMB::MCData*) { Assert(0); }
  void DoMotionCompensationSglMB_SGL_H16(MotionCompensation_SglMB::MCData*);
  void DoMotionCompensationSglMB_DBL_H8 (MotionCompensation_SglMB::MCData*) { Assert(0); }
  void DoMotionCompensationSglMB_DBL_H16(MotionCompensation_SglMB::MCData*) { Assert(0); }

  MCompFunc*const* AskMCompFunc_Sgl_Luma() const;  // Returns array of 16 func.pointers.
  MCompFunc*const* AskMCompFunc_Dbl_Luma() const;  // Returns array of 4 func.pointers.
  MCompFunc*const* AskMCompFunc_Sgl_Chroma(uint2 chroma) const;
  MCompFunc*const* AskMCompFunc_Dbl_Chroma(uint2 chroma) const;

private:
  MCompFunc* sglluma[4];
  MCompFunc* dblluma[16];
  MCompFunc* sglchroma420[4];
  MCompFunc* dblchroma420[16];
  MCompFunc* sglchroma422[4];
  MCompFunc* dblchroma422[16];
  MCompFunc* sglchroma444[4];
  MCompFunc* dblchroma444[16];
};

#endif

--- NEW FILE: idct_arai.hh ---
/*********************************************************************
  idct_arai.hh

  purpose:
    Perform IDCT using AAN-algorithm.

  notes:

  to do:

  author(s):
   - Dirk Farin, Kapellenweg 15, 72070 Tuebingen, Germany,
     email: farindk at trick.informatik.uni-stuttgart.de

  modifications:
   30/Dec/98 - Dirk Farin
     - Extracted the main IDCT code from the encoder.
 *********************************************************************/

#ifndef IDCT_ARAI_HH
#define IDCT_ARAI_HH

#include "types.hh"

void IDCT_Int (const short* in[8], short* out[8]);
void IDCT_Int2(      short* in[8], short* out[8]);
void IDCT_Int2b(     short* in, Pixel* out[8],bool add);

#endif

--- NEW FILE: mcomp_sgl_mmx.cc ---
/********************************************************************************
    Copyright (C) 1999  Dirk Farin

    This program is distributed under GNU Public License (GPL) as
    outlined in the COPYING file that comes with the source distribution.

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 ********************************************************************************/

#include "video12/modules/mcomp_sgl_mmx.hh"

#include <iostream.h>
#include <iomanip.h>


static void InitMMXRegs()
{
  uint64 mask1 = 0x0101010101010101LL;
  uint64 mask2 = 0xFEFEFEFEFEFEFEFELL;

  __asm__
    (
     "movq %0,%%mm0\n\t"
     "movq %1,%%mm7\n\t"
     : : "m" (mask1), "m" (mask2)
     );
}



// -------------------------- SINGLE PREDICTION ----------------------------------

static void LumaFF(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp;
        Pixel* dp;

  sp = mc->lastimg.y;
  dp = mc->currimg.y;

  for (int y=mc->blkheight;y>0;y--)
    {
      const uint64* spd = (const uint64*)sp;
            uint64* dpd = (uint64*)dp;

      dpd[ 0]= spd[ 0];
      dpd[ 1]= spd[ 1];

      dp += mc->bytesperline_lum;
      sp += mc->bytesperline_lum;
    }
}

static void LumaHF(MotionCompensation_SglMB::MCData* mc)
{
  InitMMXRegs();

  const Pixel* sp;
        Pixel* dp;

  sp = mc->lastimg.y;
  dp = mc->currimg.y;

  uint64 xshift = (((uint32)sp) & 7);
  sp -= xshift;

  xshift <<= 3;               // left bits to ignore
  uint64 xshift2 = 64-xshift; // right bits to ignore
  uint64 xshift3 = 64-8-xshift;

  for (int y=mc->blkheight;y>0;y--)
    {
      __asm__
        (
         // 24 pixel aligned einlesen (mm1 mm2 mm3)
         "movq   (%0),%%mm1\n\t"
         "movq  8(%0),%%mm2\n\t"
         "movq 16(%0),%%mm3\n\t"

         "movq %%mm2,%%mm4\n\t"
         "movq %%mm3,%%mm6\n\t"  // fuer spaeter

         "psrlq %2,%%mm1\n\t"
         "psrlq %2,%%mm2\n\t"

         "psllq %3,%%mm4\n\t"
         "psllq %3,%%mm3\n\t"

         "por  %%mm4,%%mm1\n\t"
         "por  %%mm3,%%mm2\n\t"

         // 16 pixel sind nun in (mm1,mm2)

         "movq %%mm1,%%mm3\n\t"
         "movq %%mm2,%%mm4\n\t"
         "movq %%mm2,%%mm5\n\t"

         "psrlq $8,%%mm3\n\t"
         "psrlq $8,%%mm4\n\t"

         "psllq $56,%%mm5\n\t"
         "psllq %4,%%mm6\n\t"

         "por  %%mm5,%%mm3\n\t"
         "por  %%mm6,%%mm4\n\t"

         // weitere 16 pixel (Startposition 1 Pixel weiter rechts) sind nun in (mm3,mm4)

         // Rounding berechnen (mm5,mm6)

         "movq %%mm1,%%mm5\n\t"
         "por  %%mm3,%%mm5\n\t"
         "pand %%mm0,%%mm5\n\t"
         "movq %%mm2,%%mm6\n\t"
         "por  %%mm4,%%mm6\n\t"
         "pand %%mm0,%%mm6\n\t"

         // Pixelwerte addieren

         "pand    %%mm7,%%mm1\n\t"
         "pand    %%mm7,%%mm2\n\t"
         "pand    %%mm7,%%mm3\n\t"
         "pand    %%mm7,%%mm4\n\t"
         "psrlq   $1,%%mm1\n\t"
         "psrlq   $1,%%mm2\n\t"
         "psrlq   $1,%%mm3\n\t"
         "psrlq   $1,%%mm4\n\t"
         "paddusb %%mm3,%%mm1\n\t"
         "paddusb %%mm4,%%mm2\n\t"

         // Rounding addieren

         "paddusb %%mm5,%%mm1\n\t"
         "paddusb %%mm6,%%mm2\n\t"

         // Abspeichern

         "movq %%mm1, (%1)\n\t"
         "movq %%mm2,8(%1)\n\t"

         : : "r"(sp),"r"(dp),"m"(xshift),"m"(xshift2),"m"(xshift3)
         );

      sp += mc->bytesperline_lum;
      dp += mc->bytesperline_lum;
    }
}

static void LumaFH(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp;
        Pixel* dp;

  sp = mc->lastimg.y;
  dp = mc->currimg.y;

  for (int y=0;y<mc->blkheight;y++)
    {
      for (int x=0;x<16;x++)
        {
          *dp++ = (*sp + sp[mc->bytesperline_lum] + 1)>>1;
          sp++;
        }
      sp += mc->bytesperline_lum-16;
      dp += mc->bytesperline_lum-16;
    }
}

static void LumaHH(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp;
        Pixel* dp;

  sp = mc->lastimg.y;
  dp = mc->currimg.y;

  for (int y=0;y<mc->blkheight;y++)
    {
      int v1 = *sp + sp[mc->bytesperline_lum];

      for (int x=0;x<8;x++)
        {
          sp++;
          int v2 = *sp + sp[mc->bytesperline_lum];
          *dp++ = (v1 + v2 +2)>>2;

          sp++;
          v1 = *sp + sp[mc->bytesperline_lum];
          *dp++ = (v1 + v2 +2)>>2;
        }
      sp += mc->bytesperline_lum-16;
      dp += mc->bytesperline_lum-16;
    }
}

static void ChromaFF(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->lastimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp++ = *sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->lastimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp++ = *sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}

static void ChromaFH(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->lastimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp++ = (*sp + sp[mc->bytesperline_chr]+1)>>1; sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->lastimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp++ = (*sp + sp[mc->bytesperline_chr]+1)>>1; sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}

static void ChromaHF(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->lastimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp++ = (*sp + sp[1]+1)>>1; sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->lastimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp++ = (*sp + sp[1]+1)>>1; sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}

static void ChromaHH(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->lastimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      int v1 = *sp + sp[mc->bytesperline_chr];

      for (int x=0;x<8;x++)
        {
          sp++;
          int v2 = *sp + sp[mc->bytesperline_chr];
          *dp++ = (v1 + v2+2)>>2;
          v1 = v2;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->lastimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      int v1 = *sp + sp[mc->bytesperline_chr];

      for (int x=0;x<8;x++)
        {
          sp++;
          int v2 = *sp + sp[mc->bytesperline_chr];
          *dp++ = (v1 + v2+2)>>2;
          v1 = v2;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}





static void LumaFFadd(MotionCompensation_SglMB::MCData* mc)
{
  InitMMXRegs();

  const Pixel* sp;
        Pixel* dp;

  sp = mc->nextimg.y;
  dp = mc->currimg.y;

#if 1

  uint64 xshift = ((uint32)sp) & 7;

  if (xshift==0)
    {
      for (int y=mc->blkheight;y>0;y--)
        {
          // rp: mm1 mm4
          // dp: mm2 mm5
                  
          __asm__
            (
             "movq (%0),%%mm1\n\t"
             "movq (%1),%%mm2\n\t"
                     
             " movq %%mm1,%%mm3\n\t"
             "movq 8(%0),%%mm4\n\t"
             " por  %%mm2,%%mm3\n\t" // mm3 = rp[0] | dp[0]
             "movq 8(%1),%%mm5\n\t"
             " pand %%mm0,%%mm3\n\t" // mm3 = LSB of (rp[0] | dp[0]) = rounding[0]
                     
             "movq %%mm4,%%mm6\n\t"
             "por  %%mm5,%%mm6\n\t"
             "pand %%mm0,%%mm6\n\t"  // mm6 = LSB of (rp[1] | dp[1]) = rounding[1]
                     
             "pand %%mm7,%%mm1\n\t"     // mask out LSB
             "pand %%mm7,%%mm4\n\t"     // mask out LSB
             "psrlq $1,%%mm1\n\t"
             "pand %%mm7,%%mm2\n\t"     // mask out LSB
             "psrlq $1,%%mm2\n\t"
             "pand %%mm7,%%mm5\n\t"     // mask out LSB
             "psrlq $1,%%mm4\n\t"
             "paddusb  %%mm2, %%mm1\n\t"
             "psrlq $1,%%mm5\n\t"
             "paddusb  %%mm3, %%mm1\n\t"
             "movq   %%mm1,(%1)\n\t"      // dp[0] = (dp[0]>>1 + rp[0]>>1) + rounding[0]
                         
             "paddusb  %%mm5, %%mm4\n\t"
             "paddusb  %%mm6, %%mm4\n\t"
             "movq   %%mm4,8(%1)\n\t"     // dp[1] = (dp[1]>>1 + rp[1]>>1) + rounding[1]
                     
             : : "r"(sp),"r"(dp)
             );

          sp += mc->bytesperline_lum;
          dp += mc->bytesperline_lum;
        }
    }
  else
    {
      sp -= xshift;
      xshift <<= 3;               // left bits to ignore
      uint64 xshift2 = 64-xshift; // right bits to ignore
              
      for (int y=mc->blkheight;y>0;y--)
        {
          // rp: mm1 mm2 mm3   ->   mm1 mm3
          // dp:                    mm2 mm4
                  
          __asm__
            (
             // Read data aligned:
             // Read 3x 8bytes and shift data to correct position.
             // 16 pixels are in 

             "movq   (%0),%%mm1\n\t"  // rp[0] -> mm1
             "movq  8(%0),%%mm2\n\t"  // rp[1] -> mm2,mm4   (*1)
             " psrlq %2,%%mm1\n\t"    // note: shifting right because of little endian
             "movq 16(%0),%%mm3\n\t"  // rp[2] -> mm3
             " movq  %%mm2,%%mm4\n\t" // (*1)

             "psllq %3,%%mm4\n\t"
             "psrlq %2,%%mm2\n\t"
             " por   %%mm4,%%mm1\n\t" // combine first 8 pixels into mm1
             "psllq %3,%%mm3\n\t"
             "por   %%mm2,%%mm3\n\t"  // combine second 8 pixels into mm3


             "movq  (%1),%%mm2\n\t"   // dp[0]
             " movq %%mm1,%%mm5\n\t"

             "movq 8(%1),%%mm4\n\t"   // dp[1]
             " por  %%mm2,%%mm5\n\t"

             "pand %%mm0,%%mm5\n\t"   // mm5: rounding[0]
             " movq %%mm3,%%mm6\n\t"

             "por  %%mm4,%%mm6\n\t"
             " pand %%mm7,%%mm1\n\t"  // mask out LSB of rp[0]
             "pand %%mm0,%%mm6\n\t"   // mm6: rounding[1]
             " pand %%mm7,%%mm3\n\t"  // mask out LSB of rp[1]

             // calc mean

             "psrlq $1,%%mm1\n\t"
             " pand %%mm7,%%mm2\n\t"
             "psrlq $1,%%mm3\n\t"
             " pand %%mm7,%%mm4\n\t"
             "psrlq $1,%%mm2\n\t"
             " paddusb  %%mm5, %%mm1\n\t"   // rp[0] += rounding[0]
             "psrlq $1,%%mm4\n\t"
             " paddusb  %%mm6, %%mm3\n\t"   // rp[1] += rounding[1]
             "paddusb  %%mm2, %%mm1\n\t"    // rp[0] += dp[0]
             "movq   %%mm1, (%1)\n\t"
             " paddusb  %%mm4, %%mm3\n\t"   // rp[1] += dp[1]
             "movq   %%mm3,8(%1)\n\t"
                     
             : : "r"(sp),"r"(dp),"m"(xshift),"m"(xshift2)
             );

          sp += mc->bytesperline_lum;
          dp += mc->bytesperline_lum;
        }
    }

#else

  for (int y=0;y<mc->blkheight;y++)
    {
      for (int x=0;x<16;x++)
        {
          *dp = (*dp + *sp + 1)>>1;
          dp++;
          sp++;
        }
      sp += mc->bytesperline_lum-16;
      dp += mc->bytesperline_lum-16;
    }

#endif
}

static void LumaHFadd(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp;
        Pixel* dp;

  sp = mc->nextimg.y;
  dp = mc->currimg.y;

  for (int y=0;y<mc->blkheight;y++)
    {
      for (int x=0;x<16;x++)
        {
          *dp = (*dp + ((*sp + sp[1] + 1)>>1) + 1)>>1;
          dp++;
          sp++;
        }
      sp += mc->bytesperline_lum-16;
      dp += mc->bytesperline_lum-16;
    }
}

static void LumaFHadd(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp;
        Pixel* dp;

  sp = mc->nextimg.y;
  dp = mc->currimg.y;

  for (int y=0;y<mc->blkheight;y++)
    {
      for (int x=0;x<16;x++)
        {
          *dp = (*dp + ((*sp + sp[mc->bytesperline_lum] + 1)>>1) + 1)>>1;
          dp++;
          sp++;
        }
      sp += mc->bytesperline_lum-16;
      dp += mc->bytesperline_lum-16;
    }
}

static void LumaHHadd(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp;
        Pixel* dp;

  sp = mc->nextimg.y;
  dp = mc->currimg.y;

  for (int y=0;y<mc->blkheight;y++)
    {
      for (int x=0;x<16;x++)
        {
          *dp = (*dp + ((*sp + sp[1] + sp[mc->bytesperline_lum] + sp[mc->bytesperline_lum+1] +2)>>2)+1)>>1;
          dp++;
          sp++;
        }
      sp += mc->bytesperline_lum-16;
      dp += mc->bytesperline_lum-16;
    }
}

static void ChromaFFadd(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->nextimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + *sp + 1)>>1;
          sp++;
          dp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->nextimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + *sp + 1)>>1;
          sp++;
          dp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}

static void ChromaFHadd(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->nextimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + ((*sp + sp[mc->bytesperline_chr]+1)>>1) + 1)>>1;
          dp++;
          sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->nextimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + ((*sp + sp[mc->bytesperline_chr]+1)>>1) + 1)>>1;
          dp++;
          sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}

static void ChromaHFadd(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->nextimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + ((*sp + sp[1]+1)>>1) + 1)>>1; dp++; sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->nextimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + ((*sp + sp[1]+1)>>1) + 1)>>1; dp++; sp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}

static void ChromaHHadd(MotionCompensation_SglMB::MCData* mc)
{
  const Pixel* sp = mc->nextimg.cr;
        Pixel* dp = mc->currimg.cr;

  int h = mc->blkheight_chr;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + ((*sp + sp[1] + sp[mc->bytesperline_chr] + sp[mc->bytesperline_chr+1]+2)>>2)+1)>>1;
          sp++; dp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }


  sp = mc->nextimg.cb;
  dp = mc->currimg.cb;

  for (int y=0;y<h;y++)
    {
      for (int x=0;x<8;x++)
        {
          *dp = (*dp + ((*sp + sp[1] + sp[mc->bytesperline_chr] + sp[mc->bytesperline_chr+1]+2)>>2)+1)>>1;
          sp++; dp++;
        }
      sp += mc->bytesperline_chr-8;
      dp += mc->bytesperline_chr-8;
    }
}


static void MC_420_DBL_FFFF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFF(mc); LumaFFadd(mc); }
static void MC_420_DBL_FHFF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFF(mc); LumaFHadd(mc); }
static void MC_420_DBL_HFFF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFF(mc); LumaHFadd(mc); }
static void MC_420_DBL_HHFF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFF(mc); LumaHHadd(mc); }
static void MC_420_DBL_FFFH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFH(mc); LumaFFadd(mc); }
static void MC_420_DBL_FHFH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFH(mc); LumaFHadd(mc); }
static void MC_420_DBL_HFFH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFH(mc); LumaHFadd(mc); }
static void MC_420_DBL_HHFH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaFH(mc); LumaHHadd(mc); }
static void MC_420_DBL_FFHF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHF(mc); LumaFFadd(mc); }
static void MC_420_DBL_FHHF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHF(mc); LumaFHadd(mc); }
static void MC_420_DBL_HFHF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHF(mc); LumaHFadd(mc); }
static void MC_420_DBL_HHHF_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHF(mc); LumaHHadd(mc); }
static void MC_420_DBL_FFHH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHH(mc); LumaFFadd(mc); }
static void MC_420_DBL_FHHH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHH(mc); LumaFHadd(mc); }
static void MC_420_DBL_HFHH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHH(mc); LumaHFadd(mc); }
static void MC_420_DBL_HHHH_Luma(MotionCompensation_SglMB::MCData* mc) { LumaHH(mc); LumaHHadd(mc); }
              
static void MC_420_DBL_FFFF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFF(mc); ChromaFFadd(mc); }
static void MC_420_DBL_FHFF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFF(mc); ChromaFHadd(mc); }
static void MC_420_DBL_HFFF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFF(mc); ChromaHFadd(mc); }
static void MC_420_DBL_HHFF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFF(mc); ChromaHHadd(mc); }
static void MC_420_DBL_FFFH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFH(mc); ChromaFFadd(mc); }
static void MC_420_DBL_FHFH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFH(mc); ChromaFHadd(mc); }
static void MC_420_DBL_HFFH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFH(mc); ChromaHFadd(mc); }
static void MC_420_DBL_HHFH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaFH(mc); ChromaHHadd(mc); }
static void MC_420_DBL_FFHF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHF(mc); ChromaFFadd(mc); }
static void MC_420_DBL_FHHF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHF(mc); ChromaFHadd(mc); }
static void MC_420_DBL_HFHF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHF(mc); ChromaHFadd(mc); }
static void MC_420_DBL_HHHF_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHF(mc); ChromaHHadd(mc); }
static void MC_420_DBL_FFHH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHH(mc); ChromaFFadd(mc); }
static void MC_420_DBL_FHHH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHH(mc); ChromaFHadd(mc); }
static void MC_420_DBL_HFHH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHH(mc); ChromaHFadd(mc); }
static void MC_420_DBL_HHHH_Chroma(MotionCompensation_SglMB::MCData* mc) { ChromaHH(mc); ChromaHHadd(mc); }



MotionCompensation_SglMB_MMX::MotionCompensation_SglMB_MMX()
{
  sglluma[0]   = LumaFF;
  sglluma[MC_Last_HalfH] = LumaHF;
  sglluma[MC_Last_HalfV] = LumaFH;
  sglluma[MC_Last_HalfH | MC_Last_HalfV] = LumaHH;

  sglchroma420[0] = ChromaFF;
  sglchroma420[MC_Last_HalfH] = ChromaHF;
  sglchroma420[MC_Last_HalfV] = ChromaFH;
  sglchroma420[MC_Last_HalfH | MC_Last_HalfV] = ChromaHH;

  for (int i=0;i<4;i++)
    {
      sglchroma422[i] = ChromaFF;
      sglchroma444[i] = ChromaFF;
    }

  dblluma[ 0] = MC_420_DBL_FFFF_Luma;
  dblluma[ 1] = MC_420_DBL_FFFH_Luma;
  dblluma[ 2] = MC_420_DBL_FFHF_Luma;
  dblluma[ 3] = MC_420_DBL_FFHH_Luma;
  dblluma[ 4] = MC_420_DBL_FHFF_Luma;
  dblluma[ 5] = MC_420_DBL_FHFH_Luma;
  dblluma[ 6] = MC_420_DBL_FHHF_Luma;
  dblluma[ 7] = MC_420_DBL_FHHH_Luma;
  dblluma[ 8] = MC_420_DBL_HFFF_Luma;
  dblluma[ 9] = MC_420_DBL_HFFH_Luma;
  dblluma[10] = MC_420_DBL_HFHF_Luma;
  dblluma[11] = MC_420_DBL_HFHH_Luma;
  dblluma[12] = MC_420_DBL_HHFF_Luma;
  dblluma[13] = MC_420_DBL_HHFH_Luma;
  dblluma[14] = MC_420_DBL_HHHF_Luma;
  dblluma[15] = MC_420_DBL_HHHH_Luma;

  dblchroma420[ 0] = MC_420_DBL_FFFF_Chroma;
  dblchroma420[ 1] = MC_420_DBL_FFFH_Chroma;
  dblchroma420[ 2] = MC_420_DBL_FFHF_Chroma;
  dblchroma420[ 3] = MC_420_DBL_FFHH_Chroma;
  dblchroma420[ 4] = MC_420_DBL_FHFF_Chroma;
  dblchroma420[ 5] = MC_420_DBL_FHFH_Chroma;
  dblchroma420[ 6] = MC_420_DBL_FHHF_Chroma;
  dblchroma420[ 7] = MC_420_DBL_FHHH_Chroma;
  dblchroma420[ 8] = MC_420_DBL_HFFF_Chroma;
  dblchroma420[ 9] = MC_420_DBL_HFFH_Chroma;
  dblchroma420[10] = MC_420_DBL_HFHF_Chroma;
  dblchroma420[11] = MC_420_DBL_HFHH_Chroma;
  dblchroma420[12] = MC_420_DBL_HHFF_Chroma;
  dblchroma420[13] = MC_420_DBL_HHFH_Chroma;
  dblchroma420[14] = MC_420_DBL_HHHF_Chroma;
  dblchroma420[15] = MC_420_DBL_HHHH_Chroma;
}

MotionCompensation_SglMB::MCompFunc*const*
MotionCompensation_SglMB_MMX::AskMCompFunc_Sgl_Luma() const
{
  return sglluma;
}

MotionCompensation_SglMB::MCompFunc*const*
MotionCompensation_SglMB_MMX::AskMCompFunc_Dbl_Luma() const
{
  return dblluma;
}

MotionCompensation_SglMB::MCompFunc*const*
MotionCompensation_SglMB_MMX::AskMCompFunc_Sgl_Chroma(uint2 chroma) const
{
  return sglchroma420;
}

MotionCompensation_SglMB::MCompFunc*const*
MotionCompensation_SglMB_MMX::AskMCompFunc_Dbl_Chroma(uint2 chroma) const
{
  return dblchroma420;
}


--- NEW FILE: mcomp_sgl_mmx.hh ---
/********************************************************************************
  video12/modules/mcomp_sgl_mmx.hh

  purpose:

  notes:

  to do:

  author(s):
   - Dirk Farin, farin at ti.uni-mannheim.de

  modifications:
   22/Nov/1999 - Dirk Farin
     - first revision
 ********************************************************************************
    Copyright (C) 1999  Dirk Farin

    This program is distributed under GNU Public License (GPL) as
    outlined in the COPYING file that comes with the source distribution.

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 ********************************************************************************/

#ifndef DVDVIEW_VIDEO12_MODULES_MCOMP_SGL_MMX_HH
#define DVDVIEW_VIDEO12_MODULES_MCOMP_SGL_MMX_HH

#include "error.hh"
#include "video12/viddec_mods.hh"


class MotionCompensation_SglMB_MMX : public MotionCompensation_SglMB
{
public:
  MotionCompensation_SglMB_MMX();

  void Init();

  void DoMotionCompensationSglMB_SGL_H8 (MotionCompensation_SglMB::MCData*) { Assert(0); }
  void DoMotionCompensationSglMB_SGL_H16(MotionCompensation_SglMB::MCData*);
  void DoMotionCompensationSglMB_DBL_H8 (MotionCompensation_SglMB::MCData*) { Assert(0); }
  void DoMotionCompensationSglMB_DBL_H16(MotionCompensation_SglMB::MCData*) { Assert(0); }

  MCompFunc*const* AskMCompFunc_Sgl_Luma() const;  // Returns array of 16 func.pointers.
  MCompFunc*const* AskMCompFunc_Dbl_Luma() const;  // Returns array of 4 func.pointers.
  MCompFunc*const* AskMCompFunc_Sgl_Chroma(uint2 chroma) const;
  MCompFunc*const* AskMCompFunc_Dbl_Chroma(uint2 chroma) const;

private:
  MCompFunc* sglluma[4];
  MCompFunc* dblluma[16];
  MCompFunc* sglchroma420[4];
  MCompFunc* dblchroma420[16];
  MCompFunc* sglchroma422[4];
  MCompFunc* dblchroma422[16];
  MCompFunc* sglchroma444[4];
  MCompFunc* dblchroma444[16];
};

#endif

--- NEW FILE: idct_arai.cc ---

#include "video12/modules/idct_arai.hh"

#include <math.h>
#include <assert.h>
#include <iostream.h>
#include <iomanip.h>

static Pixel* clip2_0_255;
static Pixel  clip2_0_255_data[1024];

static class InitCLIP_DCT
{
public:
  InitCLIP_DCT()
    {
      clip2_0_255 = &clip2_0_255_data[512];

      for (int i=-512;i<512;i++)
        {
          int val=i;
          if (val<0) val=0;
          if (val>255) val=255;
          clip2_0_255[i]=val;
        }
    }
} dummyclass_weiour;


/* private data */
static short iclip[1024]; /* clipping table */
static short *iclp;

// static int c1,c2;

const int FDCT_M1Scale      = 30;
const int FDCT_PostM1_VK    = 12;
const int FDCT_PostM1_NK    = 16;
const int FDCT_PostM1_Shift = 14;

const int FDCT_M2Scale      = 30;
const int FDCT_PostM2_VK    = 16;
const int FDCT_PostM2_NK    = 15;
const int FDCT_PostM2_Shift = 31;

const int FDCT_M3Scale      = 30;
const int FDCT_PostM3_VK    = 17;
const int FDCT_PostM3_NK    = 15;
const int FDCT_PostM3_Shift = 30;



const int IDCT_M3Scale      = 33;  // DOCS !
const int IDCT_PostM3_VK    = 10;
const int IDCT_PostM3_NK    = 20;
const int IDCT_PostM3_Shift = 13;

const int IDCT_M2Scale      = 28;
const int IDCT_PostM2_VK    = 12;
const int IDCT_PostM2_NK    = 16;
const int IDCT_PostM2_Shift = 32;

const int IDCT_M1Scale      = 28;
const int IDCT_PostM1_VK    = 16;
const int IDCT_PostM1_NK    = 14;
const int IDCT_PostM1_Shift = 30;


typedef int FDCTInt;
typedef int IDCTInt;



#define SC(x,u,v) { D(x); ibuf[u][v] = ((x) >> (FDCTScale-FDCT_STAGE1_NK)); }

#define D(x,s) { cout << setbase(16) << setfill('0') << setw(16) << (x) << setbase(10) << " "; \
  ibuf[c1][c2]=x; c2++; if (c2==8) { c1++; c2=0; } cout << setfill(' '); }

//#define DESCALE(x,f)   ((x<0) ? ((x - ((1LL<<(f-1)))  ) >> (f)) : ((x + ((1LL<<(f-1)))  ) >> (f)))
#define DESCALE(x,f)    ((x + ((1LL<<(f)) -1 )  ) >> (f))

//#define DESCALE(x,f)      (x >> (f))
//#define DESCALE(x,f)   ((x<0) ? ((x - (1LL<<(f))/2  ) >> (f)) : ((x + (1LL<<(f))/2  ) >> (f)))
//#define DESCALE(x,f)    ((x + ((1LL<<(f))/2 )  ) >> (f))

#define IDESCALE(x,f)    ((x + ((1LL<<(f-1)) )  ) >> (f))
#define IDESCALENeg(x,f) ((x - ((1LL<<(f-1)) )  ) >> (f))

/* Eine 32x32 -> 64bit Multiplikation durchfuehren */
#define MUL32(a,b) (((long long)(a))*((long)(b)))

const long long FDCT_M1ScaleFact = (1LL<<FDCT_M1Scale);
const long long FDCT_M2ScaleFact = (1LL<<FDCT_M2Scale);
const long long FDCT_M3ScaleFact = (1LL<<FDCT_M3Scale);

const long long IDCT_M3ScaleFact = (1LL<<IDCT_M3Scale);
const long long IDCT_M2ScaleFact = (1LL<<IDCT_M2Scale);
const long long IDCT_M1ScaleFact = (1LL<<IDCT_M1Scale);

#ifndef PI
#define PI 3.1415926535
#endif

typedef long long DCTInt2;

static bool IsInitialized = false;

static DCTInt2  A1;
static DCTInt2 MA2;
static DCTInt2  A3;
static DCTInt2  A4;
static DCTInt2  A5;

static DCTInt2  A1b;
static DCTInt2 MA2b;
static DCTInt2  A3b;
static DCTInt2  A4b;
static DCTInt2  A5b;

static IDCTInt C2;
static IDCTInt C4;
static IDCTInt C6;
static IDCTInt Q;
static IDCTInt R;

static IDCTInt C2b;
static IDCTInt C4b;
static IDCTInt C6b;
static IDCTInt Qb;
static IDCTInt Rb;

static double OutputScaleFactor[8];  // fuer FDCT
static double InputScaleFactor[8];   // fuer IDCT

static long long d_fact[8][8];

static void Init()
{
  if (IsInitialized)
    return;

  /* Beachte: Jeder Wert wird exakt einmal zugewiesen und nicht veraendert, dadurch
     ist der Code Multithread-sicher. */

  // FDCT

  A1 = (DCTInt2)(sqrt(0.5)*FDCT_M1ScaleFact+0.5);
  MA2 = (DCTInt2)((-(cos(PI/8.0) - cos(3.0*PI/8.0)))*FDCT_M1ScaleFact-0.5);
  A3 = (DCTInt2)(sqrt(0.5)*FDCT_M1ScaleFact+0.5);
  A4 = (DCTInt2)((cos(PI/8.0) + cos(3.0*PI/8.0))*FDCT_M1ScaleFact+0.5);
  A5 = (DCTInt2)(cos(3.0*PI/8.0)*FDCT_M1ScaleFact+0.5);

  A1b = (DCTInt2)(sqrt(0.5)*FDCT_M2ScaleFact+0.5);
  MA2b = (DCTInt2)((-(cos(PI/8.0) - cos(3.0*PI/8.0)))*FDCT_M2ScaleFact-0.5);
  A3b = (DCTInt2)(sqrt(0.5)*FDCT_M2ScaleFact+0.5);
  A4b = (DCTInt2)((cos(PI/8.0) + cos(3.0*PI/8.0))*FDCT_M2ScaleFact+0.5);
  A5b = (DCTInt2)(cos(3.0*PI/8.0)*FDCT_M2ScaleFact+0.5);

  OutputScaleFactor[0] = sqrt(0.5)/2.0;
  {for (int i=1;i<8;i++) OutputScaleFactor[i] = 0.25/cos(PI*(double)i/16.0);}


  // IDCT

  C2  = (DCTInt2)(2.0*cos(PI/8.0)*IDCT_M2ScaleFact+0.5);
  C4  = (DCTInt2)(sqrt(2.0)*IDCT_M2ScaleFact+0.5);
  C6  = (DCTInt2)(2.0*cos(3.0*PI/8.0)*IDCT_M2ScaleFact+0.5);
  Q   = (DCTInt2)((-2.0*cos(PI/8.0) + 2.0*cos(3.0*PI/8.0))*IDCT_M2ScaleFact-0.5);
  R   = (DCTInt2)(( 2.0*cos(PI/8.0) + 2.0*cos(3.0*PI/8.0))*IDCT_M2ScaleFact+0.5);
  /*
    cout << "C2 " << 2.0*cos(PI/8.0) << endl;
    cout << "C4 " << sqrt(2.0) << endl;
    cout << "C6 " << 2.0*cos(3.0*PI/8.0) << endl;
    cout << "Q  " << (-2.0*cos(PI/8.0) + 2.0*cos(3.0*PI/8.0)) << endl;
    cout << "R  " << ( 2.0*cos(PI/8.0) + 2.0*cos(3.0*PI/8.0)) << endl;
  */
  C2b = (DCTInt2)(2.0*cos(PI/8.0)*IDCT_M1ScaleFact+0.5);
  C4b = (DCTInt2)(sqrt(2.0)*IDCT_M1ScaleFact+0.5);
  C6b = (DCTInt2)(2.0*cos(3.0*PI/8.0)*IDCT_M1ScaleFact+0.5);
  Qb  = (DCTInt2)((-2.0*cos(PI/8.0) + 2.0*cos(3.0*PI/8.0))*IDCT_M1ScaleFact-0.5);
  Rb  = (DCTInt2)(( 2.0*cos(PI/8.0) + 2.0*cos(3.0*PI/8.0))*IDCT_M1ScaleFact+0.5);
  
  InputScaleFactor[0] = 0.5*0.5*sqrt(2);
  {for (int i=1;i<8;i++) InputScaleFactor[i] = 0.5*cos(PI*((double)i)/16.0);}

  IsInitialized=true;


#if 1
  for (int u=0;u<8;u++)
    for (int v=0;v<8;v++)
      {
	d_fact[v][u] = (DCTInt2)(InputScaleFactor[u]*InputScaleFactor[v]*IDCT_M3ScaleFact+0.5);
      }
#else
  for (int u=0;u<8;u++)
    for (int v=0;v<8;v++)
      {
	d_fact[v][u] = (DCTInt2)(InputScaleFactor[u]*InputScaleFactor[v] * fact[v][u] *
                                 IDCT_M3ScaleFact+0.5);
      }
#endif





  {
    int i;
    
    iclp = iclip+512;
    for (i= -512; i<512; i++)
      iclp[i] = (i<-256) ? -256 : ((i>255) ? 255 : i);
  }
}


static struct DummyInit
{
  DummyInit() { Init(); }
} dummyinit;



void IDCT_Int(const short* in[8], short* out[8])
{
  IDCTInt fin[8][8];
  IDCTInt tmp[8][8];

  IDCTInt a0,a1,a2,a3,a4,a5,a6,a7;
  IDCTInt       b2,   b4,b5,b6   ;
  IDCTInt n0,n1,n2,n3            ;
  IDCTInt          m3,m4,m5,m6,m7;
   DCTInt2 tmp1,tmp2,tmp3,tmp4;


  // Zeilentransformationen

  {for (int u=0;u<8;u++)
    {
      if (             !in[1][u] && !in[2][u] && !in[3][u] &&
          !in[4][u] && !in[5][u] && !in[6][u] && !in[7][u])
        {
          tmp[0][u] = tmp[1][u] = tmp[2][u] = tmp[3][u] = 
          tmp[4][u] = tmp[5][u] = tmp[6][u] = tmp[7][u] = in[0][u]<<13;
        }
      else
        {
          {for (int v=0;v<8;v++)
            fin[v][u] = IDESCALE(MUL32(d_fact[v][u] , in[v][u]) , IDCT_PostM3_Shift);}

          a0 = fin[0][u];
          a1 = fin[4][u];
          a2 = fin[2][u]-fin[6][u];
          a3 = fin[2][u]+fin[6][u];
          a4 = fin[5][u]-fin[3][u];
          tmp1 = fin[1][u]+fin[7][u];
          tmp2 = fin[3][u]+fin[5][u];
          a5 = tmp1 - tmp2;
          a6 = fin[1][u]-fin[7][u];
          a7 = tmp1 + tmp2;
          
          tmp4 = MUL32(C6,(a4+a6));
          b2 = IDESCALE(MUL32(C4 , a2)       , IDCT_PostM2_Shift);
          b4 = IDESCALE(MUL32(Q  , a4) -tmp4 , IDCT_PostM2_Shift);
          b5 = IDESCALE(MUL32(C4 , a5)       , IDCT_PostM2_Shift);
          b6 = IDESCALE(MUL32(R  , a6) -tmp4 , IDCT_PostM2_Shift);

          a0 >>= -(IDCT_M2Scale - IDCT_PostM2_Shift);
          a1 >>= -(IDCT_M2Scale - IDCT_PostM2_Shift);
          a3 >>= -(IDCT_M2Scale - IDCT_PostM2_Shift);
          a7 >>= -(IDCT_M2Scale - IDCT_PostM2_Shift);
          
          tmp3 = b6-a7;
          n0 = tmp3-b5;
          n1 = a0-a1;
          n2 = b2-a3;
          n3 = a0+a1;
          
          m3 = n1+n2;
          m4 = n3+a3;
          m5 = n1-n2;
          m6 = n3-a3;
          m7 = b4-n0;
          
          tmp[0][u] = m4+a7;
          tmp[1][u] = m3+tmp3;
          tmp[2][u] = m5-n0;
          tmp[3][u] = m6-m7;
          tmp[4][u] = m6+m7;
          tmp[5][u] = m5+n0;
          tmp[6][u] = m3-tmp3;
          tmp[7][u] = m4-a7;
        }
    }}


  // Spaltentransformationen

  {for (int v=0;v<8;v++)
    {
      a0 = tmp[v][0];
      a1 = tmp[v][4];
      a2 = tmp[v][2]-tmp[v][6];
      a3 = tmp[v][2]+tmp[v][6];
      a4 = tmp[v][5]-tmp[v][3];
      tmp1 = tmp[v][1]+tmp[v][7];
      tmp2 = tmp[v][3]+tmp[v][5];
      a5 = tmp1 - tmp2;
      a6 = tmp[v][1]-tmp[v][7];
      a7 = tmp1 + tmp2;

      tmp4 = MUL32(C6,(a4+a6));
      b2 = IDESCALE(MUL32(C4 , a2)       , IDCT_PostM1_Shift);
      b4 = IDESCALE(MUL32(Q  , a4) -tmp4 , IDCT_PostM1_Shift);
      b5 = IDESCALE(MUL32(C4 , a5)       , IDCT_PostM1_Shift);
      b6 = IDESCALE(MUL32(R  , a6) -tmp4 , IDCT_PostM1_Shift);

      a0 >>= -(IDCT_M1Scale - IDCT_PostM1_Shift);
      a1 >>= -(IDCT_M1Scale - IDCT_PostM1_Shift);
      a3 >>= -(IDCT_M1Scale - IDCT_PostM1_Shift);
      a7 >>= -(IDCT_M1Scale - IDCT_PostM1_Shift);

      tmp3 = b6-a7;
      n0 = tmp3-b5;
      n1 = a0-a1;
      n2 = b2-a3;
      n3 = a0+a1;

      m3 = n1+n2;
      m4 = n3+a3;
      m5 = n1-n2;
      m6 = n3-a3;
      m7 = b4-n0;

      out[v][0] = IDESCALE( (m4+a7)   , IDCT_PostM1_NK);
      out[v][1] = IDESCALE( (m3+tmp3) , IDCT_PostM1_NK);
      out[v][2] = IDESCALE( (m5-n0)   , IDCT_PostM1_NK);
      out[v][3] = IDESCALE( (m6-m7)   , IDCT_PostM1_NK);
      out[v][4] = IDESCALE( (m6+m7)   , IDCT_PostM1_NK);
      out[v][5] = IDESCALE( (m5+n0)   , IDCT_PostM1_NK);
      out[v][6] = IDESCALE( (m3-tmp3) , IDCT_PostM1_NK);
      out[v][7] = IDESCALE( (m4-a7)   , IDCT_PostM1_NK);
    }}
}





//// --------------- The following IDCT-code originates from libJPEG ! -----------------

/**********************************************************/
/* inverse two dimensional DCT, Chen-Wang algorithm       */
/* (cf. IEEE ASSP-32, pp. 803-816, Aug. 1984)             */
/* 32-bit integer arithmetic (8 bit coefficients)         */
/* 11 mults, 29 adds per DCT                              */
/*                                      sE, 18.8.91       */
/**********************************************************/
/* coefficients extended to 12 bit for IEEE1180-1990      */
/* compliance                           sE,  2.1.94       */
/**********************************************************/

/* this code assumes >> to be a two's-complement arithmetic */
/* right shift: (-2)>>1 == -1 , (-3)>>1 == -2               */

// #include "config.h"

#define W1 2841 /* 2048*sqrt(2)*cos(1*pi/16) */
#define W2 2676 /* 2048*sqrt(2)*cos(2*pi/16) */
#define W3 2408 /* 2048*sqrt(2)*cos(3*pi/16) */
#define W5 1609 /* 2048*sqrt(2)*cos(5*pi/16) */
#define W6 1108 /* 2048*sqrt(2)*cos(6*pi/16) */
#define W7 565  /* 2048*sqrt(2)*cos(7*pi/16) */

/* global declarations */
//void Initialize_Fast_IDCT _ANSI_ARGS_((void));
//void Fast_IDCT _ANSI_ARGS_((short *block));

/* private prototypes */
//static void idctrow _ANSI_ARGS_((short *blk));
//static void idctcol _ANSI_ARGS_((short *blk));

/* row (horizontal) IDCT
 *
 *           7                       pi         1
 * dst[k] = sum c[l] * src[l] * cos( -- * ( k + - ) * l )
 *          l=0                      8          2
 *
 * where: c[0]    = 128
 *        c[1..7] = 128*sqrt(2)
 */

inline void idctrow(short *blk)
{
  int x0, x1, x2, x3, x4, x5, x6, x7, x8;

  /* shortcut */
  if (!((x1 = blk[4]<<11) | (x2 = blk[6]) | (x3 = blk[2]) |
        (x4 = blk[1]) | (x5 = blk[7]) | (x6 = blk[5]) | (x7 = blk[3])))
  {
    blk[0]=blk[1]=blk[2]=blk[3]=blk[4]=blk[5]=blk[6]=blk[7]=blk[0]<<3;
    return;
  }

  x0 = (blk[0]<<11) + 128; /* for proper rounding in the fourth stage */

  /* first stage */
  x8 = W7*(x4+x5);
  x4 = x8 + (W1-W7)*x4;
  x5 = x8 - (W1+W7)*x5;
  x8 = W3*(x6+x7);
  x6 = x8 - (W3-W5)*x6;
  x7 = x8 - (W3+W5)*x7;
  
  /* second stage */
  x8 = x0 + x1;
  x0 -= x1;
  x1 = W6*(x3+x2);
  x2 = x1 - (W2+W6)*x2;
  x3 = x1 + (W2-W6)*x3;
  x1 = x4 + x6;
  x4 -= x6;
  x6 = x5 + x7;
  x5 -= x7;
  
  /* third stage */
  x7 = x8 + x3;
  x8 -= x3;
  x3 = x0 + x2;
  x0 -= x2;
  x2 = (181*(x4+x5)+128)>>8;
  x4 = (181*(x4-x5)+128)>>8;
  
  /* fourth stage */
  blk[0] = (x7+x1)>>8;
  blk[1] = (x3+x2)>>8;
  blk[2] = (x0+x4)>>8;
  blk[3] = (x8+x6)>>8;
  blk[4] = (x8-x6)>>8;
  blk[5] = (x0-x4)>>8;
  blk[6] = (x3-x2)>>8;
  blk[7] = (x7-x1)>>8;
}

/* column (vertical) IDCT
 *
 *             7                         pi         1
 * dst[8*k] = sum c[l] * src[8*l] * cos( -- * ( k + - ) * l )
 *            l=0                        8          2
 *
 * where: c[0]    = 1/1024
 *        c[1..7] = (1/1024)*sqrt(2)
 */
inline void idctcol2(short **in,int i,short** out)
{
  int x0, x1, x2, x3, x4, x5, x6, x7, x8;

  /* shortcut */
  if (!((x1 = (in[4][i]<<8)) | (x2 = in[6][i]) | (x3 = in[2][i]) |
        (x4 = in[1][i]) | (x5 = in[7][i]) | (x6 = in[5][i]) | (x7 = in[3][i])))
  {
      out[0][i] = out[1][i] = out[2][i] = out[3][i] =
      out[4][i] = out[5][i] = out[6][i] = out[7][i] = (in[0][i]+32)>>6;  //iclp
    return;
  }

  x0 = (in[0][i]<<8) + 8192;

  /* first stage */
  x8 = W7*(x4+x5) + 4;
  x4 = (x8+(W1-W7)*x4)>>3;
  x5 = (x8-(W1+W7)*x5)>>3;
  x8 = W3*(x6+x7) + 4;
  x6 = (x8-(W3-W5)*x6)>>3;
  x7 = (x8-(W3+W5)*x7)>>3;
  
  /* second stage */
  x8 = x0 + x1;
  x0 -= x1;
  x1 = W6*(x3+x2) + 4;
  x2 = (x1-(W2+W6)*x2)>>3;
  x3 = (x1+(W2-W6)*x3)>>3;
  x1 = x4 + x6;
  x4 -= x6;
  x6 = x5 + x7;
  x5 -= x7;
  
  /* third stage */
  x7 = x8 + x3;
  x8 -= x3;
  x3 = x0 + x2;
  x0 -= x2;
  x2 = (181*(x4+x5)+128)>>8;
  x4 = (181*(x4-x5)+128)>>8;
  
  /* fourth stage */
  out[0][i] = (x7+x1)>>14;
  out[1][i] = (x3+x2)>>14;
  out[2][i] = (x0+x4)>>14;
  out[3][i] = (x8+x6)>>14;
  out[4][i] = (x8-x6)>>14;
  out[5][i] = (x0-x4)>>14;
  out[6][i] = (x3-x2)>>14;
  out[7][i] = (x7-x1)>>14;
}

inline void idctcol2b(short *in,int i,Pixel** out,bool add)
{
  int x0, x1, x2, x3, x4, x5, x6, x7, x8;

  /* shortcut */
  if (!((x1 = (in[4*8]<<8)) | (x2 = in[6*8]) | (x3 = in[2*8]) |
        (x4 = in[1*8]) | (x5 = in[7*8]) | (x6 = in[5*8]) | (x7 = in[3*8])))
  {
    if (add)
      {
#if 0
        out[0][i] = clip2_0_255[ out[0][i] + ((in[0][i]+32)>>6)];
        out[1][i] = clip2_0_255[ out[1][i] + ((in[1][i]+32)>>6)];
        out[2][i] = clip2_0_255[ out[2][i] + ((in[2][i]+32)>>6)];
        out[3][i] = clip2_0_255[ out[3][i] + ((in[3][i]+32)>>6)];
        out[4][i] = clip2_0_255[ out[4][i] + ((in[4][i]+32)>>6)];
        out[5][i] = clip2_0_255[ out[5][i] + ((in[5][i]+32)>>6)];
        out[6][i] = clip2_0_255[ out[6][i] + ((in[6][i]+32)>>6)];
        out[7][i] = clip2_0_255[ out[7][i] + ((in[7][i]+32)>>6)];
        return;
#endif
      }
    else
      {
#if 1
        out[0][i] = out[1][i] = out[2][i] = out[3][i] =
        out[4][i] = out[5][i] = out[6][i] = out[7][i] = clip2_0_255[(in[0*8]+32)>>6];  //iclp
        return;
#endif
      }
  }

  x0 = (in[0*8]<<8) + 8192;

  /* first stage */
  x8 = W7*(x4+x5) + 4;
  x4 = (x8+(W1-W7)*x4)>>3;
  x5 = (x8-(W1+W7)*x5)>>3;
  x8 = W3*(x6+x7) + 4;
  x6 = (x8-(W3-W5)*x6)>>3;
  x7 = (x8-(W3+W5)*x7)>>3;
  
  /* second stage */
  x8 = x0 + x1;
  x0 -= x1;
  x1 = W6*(x3+x2) + 4;
  x2 = (x1-(W2+W6)*x2)>>3;
  x3 = (x1+(W2-W6)*x3)>>3;
  x1 = x4 + x6;
  x4 -= x6;
  x6 = x5 + x7;
  x5 -= x7;
  
  /* third stage */
  x7 = x8 + x3;
  x8 -= x3;
  x3 = x0 + x2;
  x0 -= x2;
  x2 = (181*(x4+x5)+128)>>8;
  x4 = (181*(x4-x5)+128)>>8;
  
  /* fourth stage */

  if (add)
    {
      out[0][i] = clip2_0_255[ out[0][i] + ((x7+x1)>>14) ];
      out[7][i] = clip2_0_255[ out[7][i] + ((x7-x1)>>14) ];
      out[1][i] = clip2_0_255[ out[1][i] + ((x3+x2)>>14) ];
      out[6][i] = clip2_0_255[ out[6][i] + ((x3-x2)>>14) ];
      out[2][i] = clip2_0_255[ out[2][i] + ((x0+x4)>>14) ];
      out[5][i] = clip2_0_255[ out[5][i] + ((x0-x4)>>14) ];
      out[3][i] = clip2_0_255[ out[3][i] + ((x8+x6)>>14) ];
      out[4][i] = clip2_0_255[ out[4][i] + ((x8-x6)>>14) ];
    }
  else
    {
      out[0][i] = clip2_0_255[ ((x7+x1)>>14) ];
      out[7][i] = clip2_0_255[ ((x7-x1)>>14) ];
      out[1][i] = clip2_0_255[ ((x3+x2)>>14) ];
      out[6][i] = clip2_0_255[ ((x3-x2)>>14) ];
      out[2][i] = clip2_0_255[ ((x0+x4)>>14) ];
      out[5][i] = clip2_0_255[ ((x0-x4)>>14) ];
      out[3][i] = clip2_0_255[ ((x8+x6)>>14) ];
      out[4][i] = clip2_0_255[ ((x8-x6)>>14) ];
    }
}

/* two dimensional inverse discrete cosine transform */
void IDCT_Int2(short* in[8], short* out[8])
{
  int i;

  for (i=0; i<8; i++)
    idctrow(in[i]);

  for (i=0; i<8; i++)
    idctcol2(in,i,out);
}


void IDCT_Int2b(short* in, Pixel* out[8],bool add)
{
  int i;

  for (i=0; i<8; i++)
    idctrow(in+8*i);

  for (i=0; i<8; i++)
    idctcol2b(in+i,i,out,add);
}

--- NEW FILE: mcomp_sgl_simple.cc ---
/********************************************************************************
    Copyright (C) 1999  Dirk Farin

    This program is distributed under GNU Public License (GPL) as
    outlined in the COPYING file that comes with the source distribution.

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
[...1374 lines suppressed...]
}

MotionCompensation_SglMB::MCompFunc*const*
MotionCompensation_SglMB_Simple::AskMCompFunc_Dbl_Luma() const
{
  return dblluma;
}

MotionCompensation_SglMB::MCompFunc*const*
MotionCompensation_SglMB_Simple::AskMCompFunc_Sgl_Chroma(uint2 chroma) const
{
  return sglchroma420;
}

MotionCompensation_SglMB::MCompFunc*const*
MotionCompensation_SglMB_Simple::AskMCompFunc_Dbl_Chroma(uint2 chroma) const
{
  return dblchroma420;
}





More information about the dslinux-commit mailing list