1 | /*
|
---|
2 | * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
|
---|
3 | *
|
---|
4 | * This library is free software; you can redistribute it and/or
|
---|
5 | * modify it under the terms of the GNU Lesser General Public
|
---|
6 | * License as published by the Free Software Foundation; either
|
---|
7 | * version 2 of the License, or (at your option) any later version.
|
---|
8 | *
|
---|
9 | * This library is distributed in the hope that it will be useful,
|
---|
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
12 | * Lesser General Public License for more details.
|
---|
13 | *
|
---|
14 | * You should have received a copy of the GNU Lesser General Public
|
---|
15 | * License along with this library; if not, write to the Free Software
|
---|
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
---|
17 | */
|
---|
18 |
|
---|
19 |
|
---|
20 | /***********************************/
|
---|
21 | /* IDCT */
|
---|
22 |
|
---|
23 | /* in/out: mma=mma+mmb, mmb=mmb-mma */
|
---|
24 | #define SUMSUB_BA( a, b ) \
|
---|
25 | "paddw "#b", "#a" \n\t"\
|
---|
26 | "paddw "#b", "#b" \n\t"\
|
---|
27 | "psubw "#a", "#b" \n\t"
|
---|
28 |
|
---|
29 | #define SUMSUB_BADC( a, b, c, d ) \
|
---|
30 | "paddw "#b", "#a" \n\t"\
|
---|
31 | "paddw "#d", "#c" \n\t"\
|
---|
32 | "paddw "#b", "#b" \n\t"\
|
---|
33 | "paddw "#d", "#d" \n\t"\
|
---|
34 | "psubw "#a", "#b" \n\t"\
|
---|
35 | "psubw "#c", "#d" \n\t"
|
---|
36 |
|
---|
37 | #define SUMSUBD2_AB( a, b, t ) \
|
---|
38 | "movq "#b", "#t" \n\t"\
|
---|
39 | "psraw $1 , "#b" \n\t"\
|
---|
40 | "paddw "#a", "#b" \n\t"\
|
---|
41 | "psraw $1 , "#a" \n\t"\
|
---|
42 | "psubw "#t", "#a" \n\t"
|
---|
43 |
|
---|
44 | #define IDCT4_1D( s02, s13, d02, d13, t ) \
|
---|
45 | SUMSUB_BA ( s02, d02 )\
|
---|
46 | SUMSUBD2_AB( s13, d13, t )\
|
---|
47 | SUMSUB_BADC( d13, s02, s13, d02 )
|
---|
48 |
|
---|
49 | #define TRANSPOSE4(a,b,c,d,t)\
|
---|
50 | SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
|
---|
51 | SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
|
---|
52 | SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
|
---|
53 | SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
|
---|
54 |
|
---|
55 | #define STORE_DIFF_4P( p, t, z ) \
|
---|
56 | "psraw $6, "#p" \n\t"\
|
---|
57 | "movd (%0), "#t" \n\t"\
|
---|
58 | "punpcklbw "#z", "#t" \n\t"\
|
---|
59 | "paddsw "#t", "#p" \n\t"\
|
---|
60 | "packuswb "#z", "#p" \n\t"\
|
---|
61 | "movd "#p", (%0) \n\t"
|
---|
62 |
|
---|
63 | static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
|
---|
64 | {
|
---|
65 | /* Load dct coeffs */
|
---|
66 | asm volatile(
|
---|
67 | "movq (%0), %%mm0 \n\t"
|
---|
68 | "movq 8(%0), %%mm1 \n\t"
|
---|
69 | "movq 16(%0), %%mm2 \n\t"
|
---|
70 | "movq 24(%0), %%mm3 \n\t"
|
---|
71 | :: "r"(block) );
|
---|
72 |
|
---|
73 | asm volatile(
|
---|
74 | /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */
|
---|
75 | IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
|
---|
76 |
|
---|
77 | "movq %0, %%mm6 \n\t"
|
---|
78 | /* in: 1,4,0,2 out: 1,2,3,0 */
|
---|
79 | TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
|
---|
80 |
|
---|
81 | "paddw %%mm6, %%mm3 \n\t"
|
---|
82 |
|
---|
83 | /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */
|
---|
84 | IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
|
---|
85 |
|
---|
86 | "pxor %%mm7, %%mm7 \n\t"
|
---|
87 | :: "m"(ff_pw_32));
|
---|
88 |
|
---|
89 | asm volatile(
|
---|
90 | STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
|
---|
91 | "add %1, %0 \n\t"
|
---|
92 | STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
|
---|
93 | "add %1, %0 \n\t"
|
---|
94 | STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
|
---|
95 | "add %1, %0 \n\t"
|
---|
96 | STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
|
---|
97 | : "+r"(dst)
|
---|
98 | : "r" ((long)stride)
|
---|
99 | );
|
---|
100 | }
|
---|
101 |
|
---|
102 | static inline void h264_idct8_1d(int16_t *block)
|
---|
103 | {
|
---|
104 | asm volatile(
|
---|
105 | "movq 112(%0), %%mm7 \n\t"
|
---|
106 | "movq 80(%0), %%mm5 \n\t"
|
---|
107 | "movq 48(%0), %%mm3 \n\t"
|
---|
108 | "movq 16(%0), %%mm1 \n\t"
|
---|
109 |
|
---|
110 | "movq %%mm7, %%mm4 \n\t"
|
---|
111 | "movq %%mm3, %%mm6 \n\t"
|
---|
112 | "movq %%mm5, %%mm0 \n\t"
|
---|
113 | "movq %%mm7, %%mm2 \n\t"
|
---|
114 | "psraw $1, %%mm4 \n\t"
|
---|
115 | "psraw $1, %%mm6 \n\t"
|
---|
116 | "psubw %%mm7, %%mm0 \n\t"
|
---|
117 | "psubw %%mm6, %%mm2 \n\t"
|
---|
118 | "psubw %%mm4, %%mm0 \n\t"
|
---|
119 | "psubw %%mm3, %%mm2 \n\t"
|
---|
120 | "psubw %%mm3, %%mm0 \n\t"
|
---|
121 | "paddw %%mm1, %%mm2 \n\t"
|
---|
122 |
|
---|
123 | "movq %%mm5, %%mm4 \n\t"
|
---|
124 | "movq %%mm1, %%mm6 \n\t"
|
---|
125 | "psraw $1, %%mm4 \n\t"
|
---|
126 | "psraw $1, %%mm6 \n\t"
|
---|
127 | "paddw %%mm5, %%mm4 \n\t"
|
---|
128 | "paddw %%mm1, %%mm6 \n\t"
|
---|
129 | "paddw %%mm7, %%mm4 \n\t"
|
---|
130 | "paddw %%mm5, %%mm6 \n\t"
|
---|
131 | "psubw %%mm1, %%mm4 \n\t"
|
---|
132 | "paddw %%mm3, %%mm6 \n\t"
|
---|
133 |
|
---|
134 | "movq %%mm0, %%mm1 \n\t"
|
---|
135 | "movq %%mm4, %%mm3 \n\t"
|
---|
136 | "movq %%mm2, %%mm5 \n\t"
|
---|
137 | "movq %%mm6, %%mm7 \n\t"
|
---|
138 | "psraw $2, %%mm6 \n\t"
|
---|
139 | "psraw $2, %%mm3 \n\t"
|
---|
140 | "psraw $2, %%mm5 \n\t"
|
---|
141 | "psraw $2, %%mm0 \n\t"
|
---|
142 | "paddw %%mm6, %%mm1 \n\t"
|
---|
143 | "paddw %%mm2, %%mm3 \n\t"
|
---|
144 | "psubw %%mm4, %%mm5 \n\t"
|
---|
145 | "psubw %%mm0, %%mm7 \n\t"
|
---|
146 |
|
---|
147 | "movq 32(%0), %%mm2 \n\t"
|
---|
148 | "movq 96(%0), %%mm6 \n\t"
|
---|
149 | "movq %%mm2, %%mm4 \n\t"
|
---|
150 | "movq %%mm6, %%mm0 \n\t"
|
---|
151 | "psraw $1, %%mm4 \n\t"
|
---|
152 | "psraw $1, %%mm6 \n\t"
|
---|
153 | "psubw %%mm0, %%mm4 \n\t"
|
---|
154 | "paddw %%mm2, %%mm6 \n\t"
|
---|
155 |
|
---|
156 | "movq (%0), %%mm2 \n\t"
|
---|
157 | "movq 64(%0), %%mm0 \n\t"
|
---|
158 | SUMSUB_BA( %%mm0, %%mm2 )
|
---|
159 | SUMSUB_BA( %%mm6, %%mm0 )
|
---|
160 | SUMSUB_BA( %%mm4, %%mm2 )
|
---|
161 | SUMSUB_BA( %%mm7, %%mm6 )
|
---|
162 | SUMSUB_BA( %%mm5, %%mm4 )
|
---|
163 | SUMSUB_BA( %%mm3, %%mm2 )
|
---|
164 | SUMSUB_BA( %%mm1, %%mm0 )
|
---|
165 | :: "r"(block)
|
---|
166 | );
|
---|
167 | }
|
---|
168 |
|
---|
169 | static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
|
---|
170 | {
|
---|
171 | int i;
|
---|
172 | int16_t __attribute__ ((aligned(8))) b2[64];
|
---|
173 |
|
---|
174 | block[0] += 32;
|
---|
175 |
|
---|
176 | for(i=0; i<2; i++){
|
---|
177 | uint64_t tmp;
|
---|
178 |
|
---|
179 | h264_idct8_1d(block+4*i);
|
---|
180 |
|
---|
181 | asm volatile(
|
---|
182 | "movq %%mm7, %0 \n\t"
|
---|
183 | TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
|
---|
184 | "movq %%mm0, 8(%1) \n\t"
|
---|
185 | "movq %%mm6, 24(%1) \n\t"
|
---|
186 | "movq %%mm7, 40(%1) \n\t"
|
---|
187 | "movq %%mm4, 56(%1) \n\t"
|
---|
188 | "movq %0, %%mm7 \n\t"
|
---|
189 | TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
|
---|
190 | "movq %%mm7, (%1) \n\t"
|
---|
191 | "movq %%mm1, 16(%1) \n\t"
|
---|
192 | "movq %%mm0, 32(%1) \n\t"
|
---|
193 | "movq %%mm3, 48(%1) \n\t"
|
---|
194 | : "=m"(tmp)
|
---|
195 | : "r"(b2+32*i)
|
---|
196 | : "memory"
|
---|
197 | );
|
---|
198 | }
|
---|
199 |
|
---|
200 | for(i=0; i<2; i++){
|
---|
201 | h264_idct8_1d(b2+4*i);
|
---|
202 |
|
---|
203 | asm volatile(
|
---|
204 | "psraw $6, %%mm7 \n\t"
|
---|
205 | "psraw $6, %%mm6 \n\t"
|
---|
206 | "psraw $6, %%mm5 \n\t"
|
---|
207 | "psraw $6, %%mm4 \n\t"
|
---|
208 | "psraw $6, %%mm3 \n\t"
|
---|
209 | "psraw $6, %%mm2 \n\t"
|
---|
210 | "psraw $6, %%mm1 \n\t"
|
---|
211 | "psraw $6, %%mm0 \n\t"
|
---|
212 |
|
---|
213 | "movq %%mm7, (%0) \n\t"
|
---|
214 | "movq %%mm5, 16(%0) \n\t"
|
---|
215 | "movq %%mm3, 32(%0) \n\t"
|
---|
216 | "movq %%mm1, 48(%0) \n\t"
|
---|
217 | "movq %%mm0, 64(%0) \n\t"
|
---|
218 | "movq %%mm2, 80(%0) \n\t"
|
---|
219 | "movq %%mm4, 96(%0) \n\t"
|
---|
220 | "movq %%mm6, 112(%0) \n\t"
|
---|
221 | :: "r"(b2+4*i)
|
---|
222 | : "memory"
|
---|
223 | );
|
---|
224 | }
|
---|
225 |
|
---|
226 | add_pixels_clamped_mmx(b2, dst, stride);
|
---|
227 | }
|
---|
228 |
|
---|
229 | static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
|
---|
230 | {
|
---|
231 | int dc = (block[0] + 32) >> 6;
|
---|
232 | asm volatile(
|
---|
233 | "movd %0, %%mm0 \n\t"
|
---|
234 | "pshufw $0, %%mm0, %%mm0 \n\t"
|
---|
235 | "pxor %%mm1, %%mm1 \n\t"
|
---|
236 | "psubw %%mm0, %%mm1 \n\t"
|
---|
237 | "packuswb %%mm0, %%mm0 \n\t"
|
---|
238 | "packuswb %%mm1, %%mm1 \n\t"
|
---|
239 | ::"r"(dc)
|
---|
240 | );
|
---|
241 | asm volatile(
|
---|
242 | "movd %0, %%mm2 \n\t"
|
---|
243 | "movd %1, %%mm3 \n\t"
|
---|
244 | "movd %2, %%mm4 \n\t"
|
---|
245 | "movd %3, %%mm5 \n\t"
|
---|
246 | "paddusb %%mm0, %%mm2 \n\t"
|
---|
247 | "paddusb %%mm0, %%mm3 \n\t"
|
---|
248 | "paddusb %%mm0, %%mm4 \n\t"
|
---|
249 | "paddusb %%mm0, %%mm5 \n\t"
|
---|
250 | "psubusb %%mm1, %%mm2 \n\t"
|
---|
251 | "psubusb %%mm1, %%mm3 \n\t"
|
---|
252 | "psubusb %%mm1, %%mm4 \n\t"
|
---|
253 | "psubusb %%mm1, %%mm5 \n\t"
|
---|
254 | "movd %%mm2, %0 \n\t"
|
---|
255 | "movd %%mm3, %1 \n\t"
|
---|
256 | "movd %%mm4, %2 \n\t"
|
---|
257 | "movd %%mm5, %3 \n\t"
|
---|
258 | :"+m"(*(uint32_t*)(dst+0*stride)),
|
---|
259 | "+m"(*(uint32_t*)(dst+1*stride)),
|
---|
260 | "+m"(*(uint32_t*)(dst+2*stride)),
|
---|
261 | "+m"(*(uint32_t*)(dst+3*stride))
|
---|
262 | );
|
---|
263 | }
|
---|
264 |
|
---|
265 | static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
|
---|
266 | {
|
---|
267 | int dc = (block[0] + 32) >> 6;
|
---|
268 | int y;
|
---|
269 | asm volatile(
|
---|
270 | "movd %0, %%mm0 \n\t"
|
---|
271 | "pshufw $0, %%mm0, %%mm0 \n\t"
|
---|
272 | "pxor %%mm1, %%mm1 \n\t"
|
---|
273 | "psubw %%mm0, %%mm1 \n\t"
|
---|
274 | "packuswb %%mm0, %%mm0 \n\t"
|
---|
275 | "packuswb %%mm1, %%mm1 \n\t"
|
---|
276 | ::"r"(dc)
|
---|
277 | );
|
---|
278 | for(y=2; y--; dst += 4*stride){
|
---|
279 | asm volatile(
|
---|
280 | "movq %0, %%mm2 \n\t"
|
---|
281 | "movq %1, %%mm3 \n\t"
|
---|
282 | "movq %2, %%mm4 \n\t"
|
---|
283 | "movq %3, %%mm5 \n\t"
|
---|
284 | "paddusb %%mm0, %%mm2 \n\t"
|
---|
285 | "paddusb %%mm0, %%mm3 \n\t"
|
---|
286 | "paddusb %%mm0, %%mm4 \n\t"
|
---|
287 | "paddusb %%mm0, %%mm5 \n\t"
|
---|
288 | "psubusb %%mm1, %%mm2 \n\t"
|
---|
289 | "psubusb %%mm1, %%mm3 \n\t"
|
---|
290 | "psubusb %%mm1, %%mm4 \n\t"
|
---|
291 | "psubusb %%mm1, %%mm5 \n\t"
|
---|
292 | "movq %%mm2, %0 \n\t"
|
---|
293 | "movq %%mm3, %1 \n\t"
|
---|
294 | "movq %%mm4, %2 \n\t"
|
---|
295 | "movq %%mm5, %3 \n\t"
|
---|
296 | :"+m"(*(uint64_t*)(dst+0*stride)),
|
---|
297 | "+m"(*(uint64_t*)(dst+1*stride)),
|
---|
298 | "+m"(*(uint64_t*)(dst+2*stride)),
|
---|
299 | "+m"(*(uint64_t*)(dst+3*stride))
|
---|
300 | );
|
---|
301 | }
|
---|
302 | }
|
---|
303 |
|
---|
304 |
|
---|
305 | /***********************************/
|
---|
306 | /* deblocking */
|
---|
307 |
|
---|
308 | // out: o = |x-y|>a
|
---|
309 | // clobbers: t
|
---|
310 | #define DIFF_GT_MMX(x,y,a,o,t)\
|
---|
311 | "movq "#y", "#t" \n\t"\
|
---|
312 | "movq "#x", "#o" \n\t"\
|
---|
313 | "psubusb "#x", "#t" \n\t"\
|
---|
314 | "psubusb "#y", "#o" \n\t"\
|
---|
315 | "por "#t", "#o" \n\t"\
|
---|
316 | "psubusb "#a", "#o" \n\t"
|
---|
317 |
|
---|
318 | // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
|
---|
319 | // out: mm5=beta-1, mm7=mask
|
---|
320 | // clobbers: mm4,mm6
|
---|
321 | #define H264_DEBLOCK_MASK(alpha1, beta1) \
|
---|
322 | "pshufw $0, "#alpha1", %%mm4 \n\t"\
|
---|
323 | "pshufw $0, "#beta1 ", %%mm5 \n\t"\
|
---|
324 | "packuswb %%mm4, %%mm4 \n\t"\
|
---|
325 | "packuswb %%mm5, %%mm5 \n\t"\
|
---|
326 | DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
|
---|
327 | DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
|
---|
328 | "por %%mm4, %%mm7 \n\t"\
|
---|
329 | DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
|
---|
330 | "por %%mm4, %%mm7 \n\t"\
|
---|
331 | "pxor %%mm6, %%mm6 \n\t"\
|
---|
332 | "pcmpeqb %%mm6, %%mm7 \n\t"
|
---|
333 |
|
---|
334 | // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
|
---|
335 | // out: mm1=p0' mm2=q0'
|
---|
336 | // clobbers: mm0,3-6
|
---|
337 | #define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
|
---|
338 | /* a = q0^p0^((p1-q1)>>2) */\
|
---|
339 | "movq %%mm0, %%mm4 \n\t"\
|
---|
340 | "psubb %%mm3, %%mm4 \n\t"\
|
---|
341 | "psrlw $2, %%mm4 \n\t"\
|
---|
342 | "pxor %%mm1, %%mm4 \n\t"\
|
---|
343 | "pxor %%mm2, %%mm4 \n\t"\
|
---|
344 | /* b = p0^(q1>>2) */\
|
---|
345 | "psrlw $2, %%mm3 \n\t"\
|
---|
346 | "pand "#pb_3f", %%mm3 \n\t"\
|
---|
347 | "movq %%mm1, %%mm5 \n\t"\
|
---|
348 | "pxor %%mm3, %%mm5 \n\t"\
|
---|
349 | /* c = q0^(p1>>2) */\
|
---|
350 | "psrlw $2, %%mm0 \n\t"\
|
---|
351 | "pand "#pb_3f", %%mm0 \n\t"\
|
---|
352 | "movq %%mm2, %%mm6 \n\t"\
|
---|
353 | "pxor %%mm0, %%mm6 \n\t"\
|
---|
354 | /* d = (c^b) & ~(b^a) & 1 */\
|
---|
355 | "pxor %%mm5, %%mm6 \n\t"\
|
---|
356 | "pxor %%mm4, %%mm5 \n\t"\
|
---|
357 | "pandn %%mm6, %%mm5 \n\t"\
|
---|
358 | "pand "#pb_01", %%mm5 \n\t"\
|
---|
359 | /* delta = (avg(q0, p1>>2) + (d&a))
|
---|
360 | * - (avg(p0, q1>>2) + (d&~a)) */\
|
---|
361 | "pavgb %%mm2, %%mm0 \n\t"\
|
---|
362 | "pand %%mm5, %%mm4 \n\t"\
|
---|
363 | "paddusb %%mm4, %%mm0 \n\t"\
|
---|
364 | "pavgb %%mm1, %%mm3 \n\t"\
|
---|
365 | "pxor %%mm5, %%mm4 \n\t"\
|
---|
366 | "paddusb %%mm4, %%mm3 \n\t"\
|
---|
367 | /* p0 += clip(delta, -tc0, tc0)
|
---|
368 | * q0 -= clip(delta, -tc0, tc0) */\
|
---|
369 | "movq %%mm0, %%mm4 \n\t"\
|
---|
370 | "psubusb %%mm3, %%mm0 \n\t"\
|
---|
371 | "psubusb %%mm4, %%mm3 \n\t"\
|
---|
372 | "pminub %%mm7, %%mm0 \n\t"\
|
---|
373 | "pminub %%mm7, %%mm3 \n\t"\
|
---|
374 | "paddusb %%mm0, %%mm1 \n\t"\
|
---|
375 | "paddusb %%mm3, %%mm2 \n\t"\
|
---|
376 | "psubusb %%mm3, %%mm1 \n\t"\
|
---|
377 | "psubusb %%mm0, %%mm2 \n\t"
|
---|
378 |
|
---|
379 | // in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
|
---|
380 | // out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
|
---|
381 | // clobbers: q2, tmp, tc0
|
---|
382 | #define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
|
---|
383 | "movq %%mm1, "#tmp" \n\t"\
|
---|
384 | "pavgb %%mm2, "#tmp" \n\t"\
|
---|
385 | "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\
|
---|
386 | "pxor "q2addr", "#tmp" \n\t"\
|
---|
387 | "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\
|
---|
388 | "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
|
---|
389 | "movq "#p1", "#tmp" \n\t"\
|
---|
390 | "psubusb "#tc0", "#tmp" \n\t"\
|
---|
391 | "paddusb "#p1", "#tc0" \n\t"\
|
---|
392 | "pmaxub "#tmp", "#q2" \n\t"\
|
---|
393 | "pminub "#tc0", "#q2" \n\t"\
|
---|
394 | "movq "#q2", "q1addr" \n\t"
|
---|
395 |
|
---|
396 | static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
|
---|
397 | {
|
---|
398 | uint64_t tmp0;
|
---|
399 | uint64_t tc = (uint8_t)tc0[1]*0x01010000 | (uint8_t)tc0[0]*0x0101;
|
---|
400 | // with luma, tc0=0 doesn't mean no filtering, so we need a separate input mask
|
---|
401 | uint32_t mask[2] = { (tc0[0]>=0)*0xffffffff, (tc0[1]>=0)*0xffffffff };
|
---|
402 |
|
---|
403 | asm volatile(
|
---|
404 | "movq (%1,%3), %%mm0 \n\t" //p1
|
---|
405 | "movq (%1,%3,2), %%mm1 \n\t" //p0
|
---|
406 | "movq (%2), %%mm2 \n\t" //q0
|
---|
407 | "movq (%2,%3), %%mm3 \n\t" //q1
|
---|
408 | H264_DEBLOCK_MASK(%6, %7)
|
---|
409 | "pand %5, %%mm7 \n\t"
|
---|
410 | "movq %%mm7, %0 \n\t"
|
---|
411 |
|
---|
412 | /* filter p1 */
|
---|
413 | "movq (%1), %%mm3 \n\t" //p2
|
---|
414 | DIFF_GT_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
|
---|
415 | "pandn %%mm7, %%mm6 \n\t"
|
---|
416 | "pcmpeqb %%mm7, %%mm6 \n\t"
|
---|
417 | "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta
|
---|
418 | "pshufw $80, %4, %%mm4 \n\t"
|
---|
419 | "pand %%mm7, %%mm4 \n\t" // mask & tc0
|
---|
420 | "movq %8, %%mm7 \n\t"
|
---|
421 | "pand %%mm6, %%mm7 \n\t" // mask & |p2-p0|<beta & 1
|
---|
422 | "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0
|
---|
423 | "paddb %%mm4, %%mm7 \n\t" // tc++
|
---|
424 | H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
|
---|
425 |
|
---|
426 | /* filter q1 */
|
---|
427 | "movq (%2,%3,2), %%mm4 \n\t" //q2
|
---|
428 | DIFF_GT_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
|
---|
429 | "pandn %0, %%mm6 \n\t"
|
---|
430 | "pcmpeqb %0, %%mm6 \n\t"
|
---|
431 | "pand %0, %%mm6 \n\t"
|
---|
432 | "pshufw $80, %4, %%mm5 \n\t"
|
---|
433 | "pand %%mm6, %%mm5 \n\t"
|
---|
434 | "pand %8, %%mm6 \n\t"
|
---|
435 | "paddb %%mm6, %%mm7 \n\t"
|
---|
436 | "movq (%2,%3), %%mm3 \n\t"
|
---|
437 | H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
|
---|
438 |
|
---|
439 | /* filter p0, q0 */
|
---|
440 | H264_DEBLOCK_P0_Q0(%8, %9)
|
---|
441 | "movq %%mm1, (%1,%3,2) \n\t"
|
---|
442 | "movq %%mm2, (%2) \n\t"
|
---|
443 |
|
---|
444 | : "=m"(tmp0)
|
---|
445 | : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
|
---|
446 | "m"(tc), "m"(*(uint64_t*)mask), "m"(alpha1), "m"(beta1),
|
---|
447 | "m"(mm_bone), "m"(ff_pb_3F)
|
---|
448 | );
|
---|
449 | }
|
---|
450 |
|
---|
451 | static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
---|
452 | {
|
---|
453 | if((tc0[0] & tc0[1]) >= 0)
|
---|
454 | h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
|
---|
455 | if((tc0[2] & tc0[3]) >= 0)
|
---|
456 | h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
|
---|
457 | }
|
---|
458 | static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
---|
459 | {
|
---|
460 | //FIXME: could cut some load/stores by merging transpose with filter
|
---|
461 | // also, it only needs to transpose 6x8
|
---|
462 | uint8_t trans[8*8];
|
---|
463 | int i;
|
---|
464 | for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
|
---|
465 | if((tc0[0] & tc0[1]) < 0)
|
---|
466 | continue;
|
---|
467 | transpose4x4(trans, pix-4, 8, stride);
|
---|
468 | transpose4x4(trans +4*8, pix, 8, stride);
|
---|
469 | transpose4x4(trans+4, pix-4+4*stride, 8, stride);
|
---|
470 | transpose4x4(trans+4+4*8, pix +4*stride, 8, stride);
|
---|
471 | h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
|
---|
472 | transpose4x4(pix-2, trans +2*8, stride, 8);
|
---|
473 | transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
|
---|
474 | }
|
---|
475 | }
|
---|
476 |
|
---|
477 | static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
|
---|
478 | {
|
---|
479 | asm volatile(
|
---|
480 | "movq (%0), %%mm0 \n\t" //p1
|
---|
481 | "movq (%0,%2), %%mm1 \n\t" //p0
|
---|
482 | "movq (%1), %%mm2 \n\t" //q0
|
---|
483 | "movq (%1,%2), %%mm3 \n\t" //q1
|
---|
484 | H264_DEBLOCK_MASK(%4, %5)
|
---|
485 | "movd %3, %%mm6 \n\t"
|
---|
486 | "punpcklbw %%mm6, %%mm6 \n\t"
|
---|
487 | "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask
|
---|
488 | H264_DEBLOCK_P0_Q0(%6, %7)
|
---|
489 | "movq %%mm1, (%0,%2) \n\t"
|
---|
490 | "movq %%mm2, (%1) \n\t"
|
---|
491 |
|
---|
492 | :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
|
---|
493 | "r"(*(uint32_t*)tc0),
|
---|
494 | "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
|
---|
495 | );
|
---|
496 | }
|
---|
497 |
|
---|
498 | static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
---|
499 | {
|
---|
500 | h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
|
---|
501 | }
|
---|
502 |
|
---|
503 | static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
|
---|
504 | {
|
---|
505 | //FIXME: could cut some load/stores by merging transpose with filter
|
---|
506 | uint8_t trans[8*4];
|
---|
507 | transpose4x4(trans, pix-2, 8, stride);
|
---|
508 | transpose4x4(trans+4, pix-2+4*stride, 8, stride);
|
---|
509 | h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
|
---|
510 | transpose4x4(pix-2, trans, stride, 8);
|
---|
511 | transpose4x4(pix-2+4*stride, trans+4, stride, 8);
|
---|
512 | }
|
---|
513 |
|
---|
514 | // p0 = (p0 + q1 + 2*p1 + 2) >> 2
|
---|
515 | #define H264_FILTER_CHROMA4(p0, p1, q1, one) \
|
---|
516 | "movq "#p0", %%mm4 \n\t"\
|
---|
517 | "pxor "#q1", %%mm4 \n\t"\
|
---|
518 | "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\
|
---|
519 | "pavgb "#q1", "#p0" \n\t"\
|
---|
520 | "psubusb %%mm4, "#p0" \n\t"\
|
---|
521 | "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
|
---|
522 |
|
---|
523 | static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
|
---|
524 | {
|
---|
525 | asm volatile(
|
---|
526 | "movq (%0), %%mm0 \n\t"
|
---|
527 | "movq (%0,%2), %%mm1 \n\t"
|
---|
528 | "movq (%1), %%mm2 \n\t"
|
---|
529 | "movq (%1,%2), %%mm3 \n\t"
|
---|
530 | H264_DEBLOCK_MASK(%3, %4)
|
---|
531 | "movq %%mm1, %%mm5 \n\t"
|
---|
532 | "movq %%mm2, %%mm6 \n\t"
|
---|
533 | H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
|
---|
534 | H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
|
---|
535 | "psubb %%mm5, %%mm1 \n\t"
|
---|
536 | "psubb %%mm6, %%mm2 \n\t"
|
---|
537 | "pand %%mm7, %%mm1 \n\t"
|
---|
538 | "pand %%mm7, %%mm2 \n\t"
|
---|
539 | "paddb %%mm5, %%mm1 \n\t"
|
---|
540 | "paddb %%mm6, %%mm2 \n\t"
|
---|
541 | "movq %%mm1, (%0,%2) \n\t"
|
---|
542 | "movq %%mm2, (%1) \n\t"
|
---|
543 | :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
|
---|
544 | "m"(alpha1), "m"(beta1), "m"(mm_bone)
|
---|
545 | );
|
---|
546 | }
|
---|
547 |
|
---|
548 | static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
|
---|
549 | {
|
---|
550 | h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
|
---|
551 | }
|
---|
552 |
|
---|
553 | static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
|
---|
554 | {
|
---|
555 | //FIXME: could cut some load/stores by merging transpose with filter
|
---|
556 | uint8_t trans[8*4];
|
---|
557 | transpose4x4(trans, pix-2, 8, stride);
|
---|
558 | transpose4x4(trans+4, pix-2+4*stride, 8, stride);
|
---|
559 | h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
|
---|
560 | transpose4x4(pix-2, trans, stride, 8);
|
---|
561 | transpose4x4(pix-2+4*stride, trans+4, stride, 8);
|
---|
562 | }
|
---|
563 |
|
---|
564 |
|
---|
565 | /***********************************/
|
---|
566 | /* motion compensation */
|
---|
567 |
|
---|
568 | #define QPEL_H264V(A,B,C,D,E,F,OP)\
|
---|
569 | "movd (%0), "#F" \n\t"\
|
---|
570 | "movq "#C", %%mm6 \n\t"\
|
---|
571 | "paddw "#D", %%mm6 \n\t"\
|
---|
572 | "psllw $2, %%mm6 \n\t"\
|
---|
573 | "psubw "#B", %%mm6 \n\t"\
|
---|
574 | "psubw "#E", %%mm6 \n\t"\
|
---|
575 | "pmullw %4, %%mm6 \n\t"\
|
---|
576 | "add %2, %0 \n\t"\
|
---|
577 | "punpcklbw %%mm7, "#F" \n\t"\
|
---|
578 | "paddw %5, "#A" \n\t"\
|
---|
579 | "paddw "#F", "#A" \n\t"\
|
---|
580 | "paddw "#A", %%mm6 \n\t"\
|
---|
581 | "psraw $5, %%mm6 \n\t"\
|
---|
582 | "packuswb %%mm6, %%mm6 \n\t"\
|
---|
583 | OP(%%mm6, (%1), A, d)\
|
---|
584 | "add %3, %1 \n\t"
|
---|
585 |
|
---|
586 | #define QPEL_H264HV(A,B,C,D,E,F,OF)\
|
---|
587 | "movd (%0), "#F" \n\t"\
|
---|
588 | "movq "#C", %%mm6 \n\t"\
|
---|
589 | "paddw "#D", %%mm6 \n\t"\
|
---|
590 | "psllw $2, %%mm6 \n\t"\
|
---|
591 | "psubw "#B", %%mm6 \n\t"\
|
---|
592 | "psubw "#E", %%mm6 \n\t"\
|
---|
593 | "pmullw %3, %%mm6 \n\t"\
|
---|
594 | "add %2, %0 \n\t"\
|
---|
595 | "punpcklbw %%mm7, "#F" \n\t"\
|
---|
596 | "paddw "#F", "#A" \n\t"\
|
---|
597 | "paddw "#A", %%mm6 \n\t"\
|
---|
598 | "movq %%mm6, "#OF"(%1) \n\t"
|
---|
599 |
|
---|
600 | #define QPEL_H264(OPNAME, OP, MMX)\
|
---|
601 | static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
---|
602 | int h=4;\
|
---|
603 | \
|
---|
604 | asm volatile(\
|
---|
605 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
606 | "movq %5, %%mm4 \n\t"\
|
---|
607 | "movq %6, %%mm5 \n\t"\
|
---|
608 | "1: \n\t"\
|
---|
609 | "movd -1(%0), %%mm1 \n\t"\
|
---|
610 | "movd (%0), %%mm2 \n\t"\
|
---|
611 | "movd 1(%0), %%mm3 \n\t"\
|
---|
612 | "movd 2(%0), %%mm0 \n\t"\
|
---|
613 | "punpcklbw %%mm7, %%mm1 \n\t"\
|
---|
614 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
615 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
616 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
617 | "paddw %%mm0, %%mm1 \n\t"\
|
---|
618 | "paddw %%mm3, %%mm2 \n\t"\
|
---|
619 | "movd -2(%0), %%mm0 \n\t"\
|
---|
620 | "movd 3(%0), %%mm3 \n\t"\
|
---|
621 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
622 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
623 | "paddw %%mm3, %%mm0 \n\t"\
|
---|
624 | "psllw $2, %%mm2 \n\t"\
|
---|
625 | "psubw %%mm1, %%mm2 \n\t"\
|
---|
626 | "pmullw %%mm4, %%mm2 \n\t"\
|
---|
627 | "paddw %%mm5, %%mm0 \n\t"\
|
---|
628 | "paddw %%mm2, %%mm0 \n\t"\
|
---|
629 | "psraw $5, %%mm0 \n\t"\
|
---|
630 | "packuswb %%mm0, %%mm0 \n\t"\
|
---|
631 | OP(%%mm0, (%1),%%mm6, d)\
|
---|
632 | "add %3, %0 \n\t"\
|
---|
633 | "add %4, %1 \n\t"\
|
---|
634 | "decl %2 \n\t"\
|
---|
635 | " jnz 1b \n\t"\
|
---|
636 | : "+a"(src), "+c"(dst), "+m"(h)\
|
---|
637 | : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
---|
638 | : "memory"\
|
---|
639 | );\
|
---|
640 | }\
|
---|
641 | static void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
---|
642 | int h=4;\
|
---|
643 | asm volatile(\
|
---|
644 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
645 | "movq %0, %%mm4 \n\t"\
|
---|
646 | "movq %1, %%mm5 \n\t"\
|
---|
647 | :: "m"(ff_pw_5), "m"(ff_pw_16)\
|
---|
648 | );\
|
---|
649 | do{\
|
---|
650 | asm volatile(\
|
---|
651 | "movd -1(%0), %%mm1 \n\t"\
|
---|
652 | "movd (%0), %%mm2 \n\t"\
|
---|
653 | "movd 1(%0), %%mm3 \n\t"\
|
---|
654 | "movd 2(%0), %%mm0 \n\t"\
|
---|
655 | "punpcklbw %%mm7, %%mm1 \n\t"\
|
---|
656 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
657 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
658 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
659 | "paddw %%mm0, %%mm1 \n\t"\
|
---|
660 | "paddw %%mm3, %%mm2 \n\t"\
|
---|
661 | "movd -2(%0), %%mm0 \n\t"\
|
---|
662 | "movd 3(%0), %%mm3 \n\t"\
|
---|
663 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
664 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
665 | "paddw %%mm3, %%mm0 \n\t"\
|
---|
666 | "psllw $2, %%mm2 \n\t"\
|
---|
667 | "psubw %%mm1, %%mm2 \n\t"\
|
---|
668 | "pmullw %%mm4, %%mm2 \n\t"\
|
---|
669 | "paddw %%mm5, %%mm0 \n\t"\
|
---|
670 | "paddw %%mm2, %%mm0 \n\t"\
|
---|
671 | "movd (%2), %%mm3 \n\t"\
|
---|
672 | "psraw $5, %%mm0 \n\t"\
|
---|
673 | "packuswb %%mm0, %%mm0 \n\t"\
|
---|
674 | PAVGB" %%mm3, %%mm0 \n\t"\
|
---|
675 | OP(%%mm0, (%1),%%mm6, d)\
|
---|
676 | "add %4, %0 \n\t"\
|
---|
677 | "add %4, %1 \n\t"\
|
---|
678 | "add %3, %2 \n\t"\
|
---|
679 | : "+a"(src), "+c"(dst), "+d"(src2)\
|
---|
680 | : "D"((long)src2Stride), "S"((long)dstStride)\
|
---|
681 | : "memory"\
|
---|
682 | );\
|
---|
683 | }while(--h);\
|
---|
684 | }\
|
---|
685 | static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
---|
686 | src -= 2*srcStride;\
|
---|
687 | asm volatile(\
|
---|
688 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
689 | "movd (%0), %%mm0 \n\t"\
|
---|
690 | "add %2, %0 \n\t"\
|
---|
691 | "movd (%0), %%mm1 \n\t"\
|
---|
692 | "add %2, %0 \n\t"\
|
---|
693 | "movd (%0), %%mm2 \n\t"\
|
---|
694 | "add %2, %0 \n\t"\
|
---|
695 | "movd (%0), %%mm3 \n\t"\
|
---|
696 | "add %2, %0 \n\t"\
|
---|
697 | "movd (%0), %%mm4 \n\t"\
|
---|
698 | "add %2, %0 \n\t"\
|
---|
699 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
700 | "punpcklbw %%mm7, %%mm1 \n\t"\
|
---|
701 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
702 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
703 | "punpcklbw %%mm7, %%mm4 \n\t"\
|
---|
704 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
---|
705 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
---|
706 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
---|
707 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
---|
708 | \
|
---|
709 | : "+a"(src), "+c"(dst)\
|
---|
710 | : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
---|
711 | : "memory"\
|
---|
712 | );\
|
---|
713 | }\
|
---|
714 | static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
---|
715 | int h=4;\
|
---|
716 | int w=3;\
|
---|
717 | src -= 2*srcStride+2;\
|
---|
718 | while(w--){\
|
---|
719 | asm volatile(\
|
---|
720 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
721 | "movd (%0), %%mm0 \n\t"\
|
---|
722 | "add %2, %0 \n\t"\
|
---|
723 | "movd (%0), %%mm1 \n\t"\
|
---|
724 | "add %2, %0 \n\t"\
|
---|
725 | "movd (%0), %%mm2 \n\t"\
|
---|
726 | "add %2, %0 \n\t"\
|
---|
727 | "movd (%0), %%mm3 \n\t"\
|
---|
728 | "add %2, %0 \n\t"\
|
---|
729 | "movd (%0), %%mm4 \n\t"\
|
---|
730 | "add %2, %0 \n\t"\
|
---|
731 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
732 | "punpcklbw %%mm7, %%mm1 \n\t"\
|
---|
733 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
734 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
735 | "punpcklbw %%mm7, %%mm4 \n\t"\
|
---|
736 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
|
---|
737 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
|
---|
738 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
|
---|
739 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
|
---|
740 | \
|
---|
741 | : "+a"(src)\
|
---|
742 | : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
|
---|
743 | : "memory"\
|
---|
744 | );\
|
---|
745 | tmp += 4;\
|
---|
746 | src += 4 - 9*srcStride;\
|
---|
747 | }\
|
---|
748 | tmp -= 3*4;\
|
---|
749 | asm volatile(\
|
---|
750 | "movq %4, %%mm6 \n\t"\
|
---|
751 | "1: \n\t"\
|
---|
752 | "movq (%0), %%mm0 \n\t"\
|
---|
753 | "paddw 10(%0), %%mm0 \n\t"\
|
---|
754 | "movq 2(%0), %%mm1 \n\t"\
|
---|
755 | "paddw 8(%0), %%mm1 \n\t"\
|
---|
756 | "movq 4(%0), %%mm2 \n\t"\
|
---|
757 | "paddw 6(%0), %%mm2 \n\t"\
|
---|
758 | "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\
|
---|
759 | "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\
|
---|
760 | "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\
|
---|
761 | "paddsw %%mm2, %%mm0 \n\t"\
|
---|
762 | "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\
|
---|
763 | "paddw %%mm6, %%mm2 \n\t"\
|
---|
764 | "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 +32 */\
|
---|
765 | "psraw $6, %%mm0 \n\t"\
|
---|
766 | "packuswb %%mm0, %%mm0 \n\t"\
|
---|
767 | OP(%%mm0, (%1),%%mm7, d)\
|
---|
768 | "add $24, %0 \n\t"\
|
---|
769 | "add %3, %1 \n\t"\
|
---|
770 | "decl %2 \n\t"\
|
---|
771 | " jnz 1b \n\t"\
|
---|
772 | : "+a"(tmp), "+c"(dst), "+m"(h)\
|
---|
773 | : "S"((long)dstStride), "m"(ff_pw_32)\
|
---|
774 | : "memory"\
|
---|
775 | );\
|
---|
776 | }\
|
---|
777 | \
|
---|
778 | static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
---|
779 | int h=8;\
|
---|
780 | asm volatile(\
|
---|
781 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
782 | "movq %5, %%mm6 \n\t"\
|
---|
783 | "1: \n\t"\
|
---|
784 | "movq (%0), %%mm0 \n\t"\
|
---|
785 | "movq 1(%0), %%mm2 \n\t"\
|
---|
786 | "movq %%mm0, %%mm1 \n\t"\
|
---|
787 | "movq %%mm2, %%mm3 \n\t"\
|
---|
788 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
789 | "punpckhbw %%mm7, %%mm1 \n\t"\
|
---|
790 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
791 | "punpckhbw %%mm7, %%mm3 \n\t"\
|
---|
792 | "paddw %%mm2, %%mm0 \n\t"\
|
---|
793 | "paddw %%mm3, %%mm1 \n\t"\
|
---|
794 | "psllw $2, %%mm0 \n\t"\
|
---|
795 | "psllw $2, %%mm1 \n\t"\
|
---|
796 | "movq -1(%0), %%mm2 \n\t"\
|
---|
797 | "movq 2(%0), %%mm4 \n\t"\
|
---|
798 | "movq %%mm2, %%mm3 \n\t"\
|
---|
799 | "movq %%mm4, %%mm5 \n\t"\
|
---|
800 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
801 | "punpckhbw %%mm7, %%mm3 \n\t"\
|
---|
802 | "punpcklbw %%mm7, %%mm4 \n\t"\
|
---|
803 | "punpckhbw %%mm7, %%mm5 \n\t"\
|
---|
804 | "paddw %%mm4, %%mm2 \n\t"\
|
---|
805 | "paddw %%mm3, %%mm5 \n\t"\
|
---|
806 | "psubw %%mm2, %%mm0 \n\t"\
|
---|
807 | "psubw %%mm5, %%mm1 \n\t"\
|
---|
808 | "pmullw %%mm6, %%mm0 \n\t"\
|
---|
809 | "pmullw %%mm6, %%mm1 \n\t"\
|
---|
810 | "movd -2(%0), %%mm2 \n\t"\
|
---|
811 | "movd 7(%0), %%mm5 \n\t"\
|
---|
812 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
813 | "punpcklbw %%mm7, %%mm5 \n\t"\
|
---|
814 | "paddw %%mm3, %%mm2 \n\t"\
|
---|
815 | "paddw %%mm5, %%mm4 \n\t"\
|
---|
816 | "movq %6, %%mm5 \n\t"\
|
---|
817 | "paddw %%mm5, %%mm2 \n\t"\
|
---|
818 | "paddw %%mm5, %%mm4 \n\t"\
|
---|
819 | "paddw %%mm2, %%mm0 \n\t"\
|
---|
820 | "paddw %%mm4, %%mm1 \n\t"\
|
---|
821 | "psraw $5, %%mm0 \n\t"\
|
---|
822 | "psraw $5, %%mm1 \n\t"\
|
---|
823 | "packuswb %%mm1, %%mm0 \n\t"\
|
---|
824 | OP(%%mm0, (%1),%%mm5, q)\
|
---|
825 | "add %3, %0 \n\t"\
|
---|
826 | "add %4, %1 \n\t"\
|
---|
827 | "decl %2 \n\t"\
|
---|
828 | " jnz 1b \n\t"\
|
---|
829 | : "+a"(src), "+c"(dst), "+m"(h)\
|
---|
830 | : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
---|
831 | : "memory"\
|
---|
832 | );\
|
---|
833 | }\
|
---|
834 | \
|
---|
835 | static void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
---|
836 | int h=8;\
|
---|
837 | asm volatile(\
|
---|
838 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
839 | "movq %0, %%mm6 \n\t"\
|
---|
840 | :: "m"(ff_pw_5)\
|
---|
841 | );\
|
---|
842 | do{\
|
---|
843 | asm volatile(\
|
---|
844 | "movq (%0), %%mm0 \n\t"\
|
---|
845 | "movq 1(%0), %%mm2 \n\t"\
|
---|
846 | "movq %%mm0, %%mm1 \n\t"\
|
---|
847 | "movq %%mm2, %%mm3 \n\t"\
|
---|
848 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
849 | "punpckhbw %%mm7, %%mm1 \n\t"\
|
---|
850 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
851 | "punpckhbw %%mm7, %%mm3 \n\t"\
|
---|
852 | "paddw %%mm2, %%mm0 \n\t"\
|
---|
853 | "paddw %%mm3, %%mm1 \n\t"\
|
---|
854 | "psllw $2, %%mm0 \n\t"\
|
---|
855 | "psllw $2, %%mm1 \n\t"\
|
---|
856 | "movq -1(%0), %%mm2 \n\t"\
|
---|
857 | "movq 2(%0), %%mm4 \n\t"\
|
---|
858 | "movq %%mm2, %%mm3 \n\t"\
|
---|
859 | "movq %%mm4, %%mm5 \n\t"\
|
---|
860 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
861 | "punpckhbw %%mm7, %%mm3 \n\t"\
|
---|
862 | "punpcklbw %%mm7, %%mm4 \n\t"\
|
---|
863 | "punpckhbw %%mm7, %%mm5 \n\t"\
|
---|
864 | "paddw %%mm4, %%mm2 \n\t"\
|
---|
865 | "paddw %%mm3, %%mm5 \n\t"\
|
---|
866 | "psubw %%mm2, %%mm0 \n\t"\
|
---|
867 | "psubw %%mm5, %%mm1 \n\t"\
|
---|
868 | "pmullw %%mm6, %%mm0 \n\t"\
|
---|
869 | "pmullw %%mm6, %%mm1 \n\t"\
|
---|
870 | "movd -2(%0), %%mm2 \n\t"\
|
---|
871 | "movd 7(%0), %%mm5 \n\t"\
|
---|
872 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
873 | "punpcklbw %%mm7, %%mm5 \n\t"\
|
---|
874 | "paddw %%mm3, %%mm2 \n\t"\
|
---|
875 | "paddw %%mm5, %%mm4 \n\t"\
|
---|
876 | "movq %5, %%mm5 \n\t"\
|
---|
877 | "paddw %%mm5, %%mm2 \n\t"\
|
---|
878 | "paddw %%mm5, %%mm4 \n\t"\
|
---|
879 | "paddw %%mm2, %%mm0 \n\t"\
|
---|
880 | "paddw %%mm4, %%mm1 \n\t"\
|
---|
881 | "psraw $5, %%mm0 \n\t"\
|
---|
882 | "psraw $5, %%mm1 \n\t"\
|
---|
883 | "movq (%2), %%mm4 \n\t"\
|
---|
884 | "packuswb %%mm1, %%mm0 \n\t"\
|
---|
885 | PAVGB" %%mm4, %%mm0 \n\t"\
|
---|
886 | OP(%%mm0, (%1),%%mm5, q)\
|
---|
887 | "add %4, %0 \n\t"\
|
---|
888 | "add %4, %1 \n\t"\
|
---|
889 | "add %3, %2 \n\t"\
|
---|
890 | : "+a"(src), "+c"(dst), "+d"(src2)\
|
---|
891 | : "D"((long)src2Stride), "S"((long)dstStride),\
|
---|
892 | "m"(ff_pw_16)\
|
---|
893 | : "memory"\
|
---|
894 | );\
|
---|
895 | }while(--h);\
|
---|
896 | }\
|
---|
897 | \
|
---|
898 | static inline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
|
---|
899 | int w= 2;\
|
---|
900 | src -= 2*srcStride;\
|
---|
901 | \
|
---|
902 | while(w--){\
|
---|
903 | asm volatile(\
|
---|
904 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
905 | "movd (%0), %%mm0 \n\t"\
|
---|
906 | "add %2, %0 \n\t"\
|
---|
907 | "movd (%0), %%mm1 \n\t"\
|
---|
908 | "add %2, %0 \n\t"\
|
---|
909 | "movd (%0), %%mm2 \n\t"\
|
---|
910 | "add %2, %0 \n\t"\
|
---|
911 | "movd (%0), %%mm3 \n\t"\
|
---|
912 | "add %2, %0 \n\t"\
|
---|
913 | "movd (%0), %%mm4 \n\t"\
|
---|
914 | "add %2, %0 \n\t"\
|
---|
915 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
916 | "punpcklbw %%mm7, %%mm1 \n\t"\
|
---|
917 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
918 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
919 | "punpcklbw %%mm7, %%mm4 \n\t"\
|
---|
920 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
---|
921 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
---|
922 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
---|
923 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
---|
924 | QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
|
---|
925 | QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
|
---|
926 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
---|
927 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
---|
928 | \
|
---|
929 | : "+a"(src), "+c"(dst)\
|
---|
930 | : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
---|
931 | : "memory"\
|
---|
932 | );\
|
---|
933 | if(h==16){\
|
---|
934 | asm volatile(\
|
---|
935 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
---|
936 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
---|
937 | QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
|
---|
938 | QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
|
---|
939 | QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
|
---|
940 | QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
|
---|
941 | QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
|
---|
942 | QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
|
---|
943 | \
|
---|
944 | : "+a"(src), "+c"(dst)\
|
---|
945 | : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
|
---|
946 | : "memory"\
|
---|
947 | );\
|
---|
948 | }\
|
---|
949 | src += 4-(h+5)*srcStride;\
|
---|
950 | dst += 4-h*dstStride;\
|
---|
951 | }\
|
---|
952 | }\
|
---|
953 | static inline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
|
---|
954 | int h = size;\
|
---|
955 | int w = (size+8)>>2;\
|
---|
956 | src -= 2*srcStride+2;\
|
---|
957 | while(w--){\
|
---|
958 | asm volatile(\
|
---|
959 | "pxor %%mm7, %%mm7 \n\t"\
|
---|
960 | "movd (%0), %%mm0 \n\t"\
|
---|
961 | "add %2, %0 \n\t"\
|
---|
962 | "movd (%0), %%mm1 \n\t"\
|
---|
963 | "add %2, %0 \n\t"\
|
---|
964 | "movd (%0), %%mm2 \n\t"\
|
---|
965 | "add %2, %0 \n\t"\
|
---|
966 | "movd (%0), %%mm3 \n\t"\
|
---|
967 | "add %2, %0 \n\t"\
|
---|
968 | "movd (%0), %%mm4 \n\t"\
|
---|
969 | "add %2, %0 \n\t"\
|
---|
970 | "punpcklbw %%mm7, %%mm0 \n\t"\
|
---|
971 | "punpcklbw %%mm7, %%mm1 \n\t"\
|
---|
972 | "punpcklbw %%mm7, %%mm2 \n\t"\
|
---|
973 | "punpcklbw %%mm7, %%mm3 \n\t"\
|
---|
974 | "punpcklbw %%mm7, %%mm4 \n\t"\
|
---|
975 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
|
---|
976 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
|
---|
977 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
|
---|
978 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
|
---|
979 | QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
|
---|
980 | QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
|
---|
981 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
|
---|
982 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
|
---|
983 | : "+a"(src)\
|
---|
984 | : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
|
---|
985 | : "memory"\
|
---|
986 | );\
|
---|
987 | if(size==16){\
|
---|
988 | asm volatile(\
|
---|
989 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\
|
---|
990 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\
|
---|
991 | QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
|
---|
992 | QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
|
---|
993 | QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
|
---|
994 | QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
|
---|
995 | QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
|
---|
996 | QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
|
---|
997 | : "+a"(src)\
|
---|
998 | : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
|
---|
999 | : "memory"\
|
---|
1000 | );\
|
---|
1001 | }\
|
---|
1002 | tmp += 4;\
|
---|
1003 | src += 4 - (size+5)*srcStride;\
|
---|
1004 | }\
|
---|
1005 | tmp -= size+8;\
|
---|
1006 | w = size>>4;\
|
---|
1007 | do{\
|
---|
1008 | h = size;\
|
---|
1009 | asm volatile(\
|
---|
1010 | "movq %4, %%mm6 \n\t"\
|
---|
1011 | "1: \n\t"\
|
---|
1012 | "movq (%0), %%mm0 \n\t"\
|
---|
1013 | "movq 8(%0), %%mm3 \n\t"\
|
---|
1014 | "movq 2(%0), %%mm1 \n\t"\
|
---|
1015 | "movq 10(%0), %%mm4 \n\t"\
|
---|
1016 | "paddw %%mm4, %%mm0 \n\t"\
|
---|
1017 | "paddw %%mm3, %%mm1 \n\t"\
|
---|
1018 | "paddw 18(%0), %%mm3 \n\t"\
|
---|
1019 | "paddw 16(%0), %%mm4 \n\t"\
|
---|
1020 | "movq 4(%0), %%mm2 \n\t"\
|
---|
1021 | "movq 12(%0), %%mm5 \n\t"\
|
---|
1022 | "paddw 6(%0), %%mm2 \n\t"\
|
---|
1023 | "paddw 14(%0), %%mm5 \n\t"\
|
---|
1024 | "psubw %%mm1, %%mm0 \n\t"\
|
---|
1025 | "psubw %%mm4, %%mm3 \n\t"\
|
---|
1026 | "psraw $2, %%mm0 \n\t"\
|
---|
1027 | "psraw $2, %%mm3 \n\t"\
|
---|
1028 | "psubw %%mm1, %%mm0 \n\t"\
|
---|
1029 | "psubw %%mm4, %%mm3 \n\t"\
|
---|
1030 | "paddsw %%mm2, %%mm0 \n\t"\
|
---|
1031 | "paddsw %%mm5, %%mm3 \n\t"\
|
---|
1032 | "psraw $2, %%mm0 \n\t"\
|
---|
1033 | "psraw $2, %%mm3 \n\t"\
|
---|
1034 | "paddw %%mm6, %%mm2 \n\t"\
|
---|
1035 | "paddw %%mm6, %%mm5 \n\t"\
|
---|
1036 | "paddw %%mm2, %%mm0 \n\t"\
|
---|
1037 | "paddw %%mm5, %%mm3 \n\t"\
|
---|
1038 | "psraw $6, %%mm0 \n\t"\
|
---|
1039 | "psraw $6, %%mm3 \n\t"\
|
---|
1040 | "packuswb %%mm3, %%mm0 \n\t"\
|
---|
1041 | OP(%%mm0, (%1),%%mm7, q)\
|
---|
1042 | "add $48, %0 \n\t"\
|
---|
1043 | "add %3, %1 \n\t"\
|
---|
1044 | "decl %2 \n\t"\
|
---|
1045 | " jnz 1b \n\t"\
|
---|
1046 | : "+a"(tmp), "+c"(dst), "+m"(h)\
|
---|
1047 | : "S"((long)dstStride), "m"(ff_pw_32)\
|
---|
1048 | : "memory"\
|
---|
1049 | );\
|
---|
1050 | tmp += 8 - size*24;\
|
---|
1051 | dst += 8 - size*dstStride;\
|
---|
1052 | }while(w--);\
|
---|
1053 | }\
|
---|
1054 | \
|
---|
1055 | static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
---|
1056 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
|
---|
1057 | }\
|
---|
1058 | static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
---|
1059 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
|
---|
1060 | OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
|
---|
1061 | }\
|
---|
1062 | \
|
---|
1063 | static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
|
---|
1064 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
---|
1065 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
---|
1066 | src += 8*srcStride;\
|
---|
1067 | dst += 8*dstStride;\
|
---|
1068 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
|
---|
1069 | OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
|
---|
1070 | }\
|
---|
1071 | \
|
---|
1072 | static void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
|
---|
1073 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
|
---|
1074 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
|
---|
1075 | src += 8*dstStride;\
|
---|
1076 | dst += 8*dstStride;\
|
---|
1077 | src2 += 8*src2Stride;\
|
---|
1078 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
|
---|
1079 | OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
|
---|
1080 | }\
|
---|
1081 | \
|
---|
1082 | static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
---|
1083 | OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
|
---|
1084 | }\
|
---|
1085 | \
|
---|
1086 | static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
|
---|
1087 | OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
|
---|
1088 | }\
|
---|
1089 | \
|
---|
1090 | static void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
|
---|
1091 | {\
|
---|
1092 | asm volatile(\
|
---|
1093 | "movq %5, %%mm6 \n\t"\
|
---|
1094 | "movq (%1), %%mm0 \n\t"\
|
---|
1095 | "movq 24(%1), %%mm1 \n\t"\
|
---|
1096 | "paddw %%mm6, %%mm0 \n\t"\
|
---|
1097 | "paddw %%mm6, %%mm1 \n\t"\
|
---|
1098 | "psraw $5, %%mm0 \n\t"\
|
---|
1099 | "psraw $5, %%mm1 \n\t"\
|
---|
1100 | "packuswb %%mm0, %%mm0 \n\t"\
|
---|
1101 | "packuswb %%mm1, %%mm1 \n\t"\
|
---|
1102 | PAVGB" (%0), %%mm0 \n\t"\
|
---|
1103 | PAVGB" (%0,%3), %%mm1 \n\t"\
|
---|
1104 | OP(%%mm0, (%2), %%mm4, d)\
|
---|
1105 | OP(%%mm1, (%2,%4), %%mm5, d)\
|
---|
1106 | "lea (%0,%3,2), %0 \n\t"\
|
---|
1107 | "lea (%2,%4,2), %2 \n\t"\
|
---|
1108 | "movq 48(%1), %%mm0 \n\t"\
|
---|
1109 | "movq 72(%1), %%mm1 \n\t"\
|
---|
1110 | "paddw %%mm6, %%mm0 \n\t"\
|
---|
1111 | "paddw %%mm6, %%mm1 \n\t"\
|
---|
1112 | "psraw $5, %%mm0 \n\t"\
|
---|
1113 | "psraw $5, %%mm1 \n\t"\
|
---|
1114 | "packuswb %%mm0, %%mm0 \n\t"\
|
---|
1115 | "packuswb %%mm1, %%mm1 \n\t"\
|
---|
1116 | PAVGB" (%0), %%mm0 \n\t"\
|
---|
1117 | PAVGB" (%0,%3), %%mm1 \n\t"\
|
---|
1118 | OP(%%mm0, (%2), %%mm4, d)\
|
---|
1119 | OP(%%mm1, (%2,%4), %%mm5, d)\
|
---|
1120 | :"+a"(src8), "+c"(src16), "+d"(dst)\
|
---|
1121 | :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\
|
---|
1122 | :"memory");\
|
---|
1123 | }\
|
---|
1124 | static void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
|
---|
1125 | {\
|
---|
1126 | asm volatile(\
|
---|
1127 | "movq %0, %%mm6 \n\t"\
|
---|
1128 | ::"m"(ff_pw_16)\
|
---|
1129 | );\
|
---|
1130 | while(h--){\
|
---|
1131 | asm volatile(\
|
---|
1132 | "movq (%1), %%mm0 \n\t"\
|
---|
1133 | "movq 8(%1), %%mm1 \n\t"\
|
---|
1134 | "paddw %%mm6, %%mm0 \n\t"\
|
---|
1135 | "paddw %%mm6, %%mm1 \n\t"\
|
---|
1136 | "psraw $5, %%mm0 \n\t"\
|
---|
1137 | "psraw $5, %%mm1 \n\t"\
|
---|
1138 | "packuswb %%mm1, %%mm0 \n\t"\
|
---|
1139 | PAVGB" (%0), %%mm0 \n\t"\
|
---|
1140 | OP(%%mm0, (%2), %%mm5, q)\
|
---|
1141 | ::"a"(src8), "c"(src16), "d"(dst)\
|
---|
1142 | :"memory");\
|
---|
1143 | src8 += src8Stride;\
|
---|
1144 | src16 += 24;\
|
---|
1145 | dst += dstStride;\
|
---|
1146 | }\
|
---|
1147 | }\
|
---|
1148 | static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
|
---|
1149 | {\
|
---|
1150 | OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
|
---|
1151 | OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
|
---|
1152 | }\
|
---|
1153 |
|
---|
1154 |
|
---|
1155 | #define H264_MC(OPNAME, SIZE, MMX) \
|
---|
1156 | static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
|
---|
1157 | OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
|
---|
1158 | }\
|
---|
1159 | \
|
---|
1160 | static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1161 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
|
---|
1162 | }\
|
---|
1163 | \
|
---|
1164 | static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1165 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
|
---|
1166 | }\
|
---|
1167 | \
|
---|
1168 | static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1169 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
|
---|
1170 | }\
|
---|
1171 | \
|
---|
1172 | static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1173 | uint64_t temp[SIZE*SIZE/8];\
|
---|
1174 | uint8_t * const half= (uint8_t*)temp;\
|
---|
1175 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
|
---|
1176 | OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
|
---|
1177 | }\
|
---|
1178 | \
|
---|
1179 | static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1180 | OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
|
---|
1181 | }\
|
---|
1182 | \
|
---|
1183 | static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1184 | uint64_t temp[SIZE*SIZE/8];\
|
---|
1185 | uint8_t * const half= (uint8_t*)temp;\
|
---|
1186 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
|
---|
1187 | OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
|
---|
1188 | }\
|
---|
1189 | \
|
---|
1190 | static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1191 | uint64_t temp[SIZE*SIZE/8];\
|
---|
1192 | uint8_t * const halfV= (uint8_t*)temp;\
|
---|
1193 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
|
---|
1194 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
|
---|
1195 | }\
|
---|
1196 | \
|
---|
1197 | static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1198 | uint64_t temp[SIZE*SIZE/8];\
|
---|
1199 | uint8_t * const halfV= (uint8_t*)temp;\
|
---|
1200 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
|
---|
1201 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
|
---|
1202 | }\
|
---|
1203 | \
|
---|
1204 | static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1205 | uint64_t temp[SIZE*SIZE/8];\
|
---|
1206 | uint8_t * const halfV= (uint8_t*)temp;\
|
---|
1207 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
|
---|
1208 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
|
---|
1209 | }\
|
---|
1210 | \
|
---|
1211 | static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1212 | uint64_t temp[SIZE*SIZE/8];\
|
---|
1213 | uint8_t * const halfV= (uint8_t*)temp;\
|
---|
1214 | put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
|
---|
1215 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
|
---|
1216 | }\
|
---|
1217 | \
|
---|
1218 | static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1219 | uint64_t temp[SIZE*(SIZE<8?12:24)/4];\
|
---|
1220 | int16_t * const tmp= (int16_t*)temp;\
|
---|
1221 | OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
|
---|
1222 | }\
|
---|
1223 | \
|
---|
1224 | static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1225 | uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
|
---|
1226 | uint8_t * const halfHV= (uint8_t*)temp;\
|
---|
1227 | int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
|
---|
1228 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
|
---|
1229 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
|
---|
1230 | }\
|
---|
1231 | \
|
---|
1232 | static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1233 | uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
|
---|
1234 | uint8_t * const halfHV= (uint8_t*)temp;\
|
---|
1235 | int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
|
---|
1236 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
|
---|
1237 | OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
|
---|
1238 | }\
|
---|
1239 | \
|
---|
1240 | static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1241 | uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
|
---|
1242 | int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
|
---|
1243 | uint8_t * const halfHV= ((uint8_t*)temp);\
|
---|
1244 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
|
---|
1245 | OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
|
---|
1246 | }\
|
---|
1247 | \
|
---|
1248 | static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
|
---|
1249 | uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
|
---|
1250 | int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
|
---|
1251 | uint8_t * const halfHV= ((uint8_t*)temp);\
|
---|
1252 | put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
|
---|
1253 | OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
|
---|
1254 | }\
|
---|
1255 |
|
---|
1256 |
|
---|
1257 | #define AVG_3DNOW_OP(a,b,temp, size) \
|
---|
1258 | "mov" #size " " #b ", " #temp " \n\t"\
|
---|
1259 | "pavgusb " #temp ", " #a " \n\t"\
|
---|
1260 | "mov" #size " " #a ", " #b " \n\t"
|
---|
1261 | #define AVG_MMX2_OP(a,b,temp, size) \
|
---|
1262 | "mov" #size " " #b ", " #temp " \n\t"\
|
---|
1263 | "pavgb " #temp ", " #a " \n\t"\
|
---|
1264 | "mov" #size " " #a ", " #b " \n\t"
|
---|
1265 |
|
---|
1266 | #define PAVGB "pavgusb"
|
---|
1267 | QPEL_H264(put_, PUT_OP, 3dnow)
|
---|
1268 | QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
|
---|
1269 | #undef PAVGB
|
---|
1270 | #define PAVGB "pavgb"
|
---|
1271 | QPEL_H264(put_, PUT_OP, mmx2)
|
---|
1272 | QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
|
---|
1273 | #undef PAVGB
|
---|
1274 |
|
---|
1275 | H264_MC(put_, 4, 3dnow)
|
---|
1276 | H264_MC(put_, 8, 3dnow)
|
---|
1277 | H264_MC(put_, 16,3dnow)
|
---|
1278 | H264_MC(avg_, 4, 3dnow)
|
---|
1279 | H264_MC(avg_, 8, 3dnow)
|
---|
1280 | H264_MC(avg_, 16,3dnow)
|
---|
1281 | H264_MC(put_, 4, mmx2)
|
---|
1282 | H264_MC(put_, 8, mmx2)
|
---|
1283 | H264_MC(put_, 16,mmx2)
|
---|
1284 | H264_MC(avg_, 4, mmx2)
|
---|
1285 | H264_MC(avg_, 8, mmx2)
|
---|
1286 | H264_MC(avg_, 16,mmx2)
|
---|
1287 |
|
---|
1288 |
|
---|
1289 | #define H264_CHROMA_OP(S,D)
|
---|
1290 | #define H264_CHROMA_OP4(S,D,T)
|
---|
1291 | #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
|
---|
1292 | #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
|
---|
1293 | #define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
|
---|
1294 | #define H264_CHROMA_MC8_MV0 put_pixels8_mmx
|
---|
1295 | #include "dsputil_h264_template_mmx.c"
|
---|
1296 | #undef H264_CHROMA_OP
|
---|
1297 | #undef H264_CHROMA_OP4
|
---|
1298 | #undef H264_CHROMA_MC8_TMPL
|
---|
1299 | #undef H264_CHROMA_MC4_TMPL
|
---|
1300 | #undef H264_CHROMA_MC2_TMPL
|
---|
1301 | #undef H264_CHROMA_MC8_MV0
|
---|
1302 |
|
---|
1303 | #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
|
---|
1304 | #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
|
---|
1305 | "pavgb " #T ", " #D " \n\t"
|
---|
1306 | #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
|
---|
1307 | #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
|
---|
1308 | #define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
|
---|
1309 | #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
|
---|
1310 | #include "dsputil_h264_template_mmx.c"
|
---|
1311 | #undef H264_CHROMA_OP
|
---|
1312 | #undef H264_CHROMA_OP4
|
---|
1313 | #undef H264_CHROMA_MC8_TMPL
|
---|
1314 | #undef H264_CHROMA_MC4_TMPL
|
---|
1315 | #undef H264_CHROMA_MC2_TMPL
|
---|
1316 | #undef H264_CHROMA_MC8_MV0
|
---|
1317 |
|
---|
1318 | #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
|
---|
1319 | #define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
|
---|
1320 | "pavgusb " #T ", " #D " \n\t"
|
---|
1321 | #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
|
---|
1322 | #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
|
---|
1323 | #define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
|
---|
1324 | #include "dsputil_h264_template_mmx.c"
|
---|
1325 | #undef H264_CHROMA_OP
|
---|
1326 | #undef H264_CHROMA_OP4
|
---|
1327 | #undef H264_CHROMA_MC8_TMPL
|
---|
1328 | #undef H264_CHROMA_MC4_TMPL
|
---|
1329 | #undef H264_CHROMA_MC8_MV0
|
---|
1330 |
|
---|
1331 | /***********************************/
|
---|
1332 | /* weighted prediction */
|
---|
1333 |
|
---|
1334 | static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
|
---|
1335 | {
|
---|
1336 | int x, y;
|
---|
1337 | offset <<= log2_denom;
|
---|
1338 | offset += (1 << log2_denom) >> 1;
|
---|
1339 | asm volatile(
|
---|
1340 | "movd %0, %%mm4 \n\t"
|
---|
1341 | "movd %1, %%mm5 \n\t"
|
---|
1342 | "movd %2, %%mm6 \n\t"
|
---|
1343 | "pshufw $0, %%mm4, %%mm4 \n\t"
|
---|
1344 | "pshufw $0, %%mm5, %%mm5 \n\t"
|
---|
1345 | "pxor %%mm7, %%mm7 \n\t"
|
---|
1346 | :: "g"(weight), "g"(offset), "g"(log2_denom)
|
---|
1347 | );
|
---|
1348 | for(y=0; y<h; y+=2){
|
---|
1349 | for(x=0; x<w; x+=4){
|
---|
1350 | asm volatile(
|
---|
1351 | "movd %0, %%mm0 \n\t"
|
---|
1352 | "movd %1, %%mm1 \n\t"
|
---|
1353 | "punpcklbw %%mm7, %%mm0 \n\t"
|
---|
1354 | "punpcklbw %%mm7, %%mm1 \n\t"
|
---|
1355 | "pmullw %%mm4, %%mm0 \n\t"
|
---|
1356 | "pmullw %%mm4, %%mm1 \n\t"
|
---|
1357 | "paddsw %%mm5, %%mm0 \n\t"
|
---|
1358 | "paddsw %%mm5, %%mm1 \n\t"
|
---|
1359 | "psraw %%mm6, %%mm0 \n\t"
|
---|
1360 | "psraw %%mm6, %%mm1 \n\t"
|
---|
1361 | "packuswb %%mm7, %%mm0 \n\t"
|
---|
1362 | "packuswb %%mm7, %%mm1 \n\t"
|
---|
1363 | "movd %%mm0, %0 \n\t"
|
---|
1364 | "movd %%mm1, %1 \n\t"
|
---|
1365 | : "+m"(*(uint32_t*)(dst+x)),
|
---|
1366 | "+m"(*(uint32_t*)(dst+x+stride))
|
---|
1367 | );
|
---|
1368 | }
|
---|
1369 | dst += 2*stride;
|
---|
1370 | }
|
---|
1371 | }
|
---|
1372 |
|
---|
1373 | static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
|
---|
1374 | {
|
---|
1375 | int x, y;
|
---|
1376 | offset = ((offset + 1) | 1) << log2_denom;
|
---|
1377 | asm volatile(
|
---|
1378 | "movd %0, %%mm3 \n\t"
|
---|
1379 | "movd %1, %%mm4 \n\t"
|
---|
1380 | "movd %2, %%mm5 \n\t"
|
---|
1381 | "movd %3, %%mm6 \n\t"
|
---|
1382 | "pshufw $0, %%mm3, %%mm3 \n\t"
|
---|
1383 | "pshufw $0, %%mm4, %%mm4 \n\t"
|
---|
1384 | "pshufw $0, %%mm5, %%mm5 \n\t"
|
---|
1385 | "pxor %%mm7, %%mm7 \n\t"
|
---|
1386 | :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
|
---|
1387 | );
|
---|
1388 | for(y=0; y<h; y++){
|
---|
1389 | for(x=0; x<w; x+=4){
|
---|
1390 | asm volatile(
|
---|
1391 | "movd %0, %%mm0 \n\t"
|
---|
1392 | "movd %1, %%mm1 \n\t"
|
---|
1393 | "punpcklbw %%mm7, %%mm0 \n\t"
|
---|
1394 | "punpcklbw %%mm7, %%mm1 \n\t"
|
---|
1395 | "pmullw %%mm3, %%mm0 \n\t"
|
---|
1396 | "pmullw %%mm4, %%mm1 \n\t"
|
---|
1397 | "paddsw %%mm1, %%mm0 \n\t"
|
---|
1398 | "paddsw %%mm5, %%mm0 \n\t"
|
---|
1399 | "psraw %%mm6, %%mm0 \n\t"
|
---|
1400 | "packuswb %%mm0, %%mm0 \n\t"
|
---|
1401 | "movd %%mm0, %0 \n\t"
|
---|
1402 | : "+m"(*(uint32_t*)(dst+x))
|
---|
1403 | : "m"(*(uint32_t*)(src+x))
|
---|
1404 | );
|
---|
1405 | }
|
---|
1406 | src += stride;
|
---|
1407 | dst += stride;
|
---|
1408 | }
|
---|
1409 | }
|
---|
1410 |
|
---|
1411 | #define H264_WEIGHT(W,H) \
|
---|
1412 | static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
|
---|
1413 | ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
|
---|
1414 | } \
|
---|
1415 | static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
|
---|
1416 | ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
|
---|
1417 | }
|
---|
1418 |
|
---|
1419 | H264_WEIGHT(16,16)
|
---|
1420 | H264_WEIGHT(16, 8)
|
---|
1421 | H264_WEIGHT( 8,16)
|
---|
1422 | H264_WEIGHT( 8, 8)
|
---|
1423 | H264_WEIGHT( 8, 4)
|
---|
1424 | H264_WEIGHT( 4, 8)
|
---|
1425 | H264_WEIGHT( 4, 4)
|
---|
1426 | H264_WEIGHT( 4, 2)
|
---|
1427 |
|
---|