1 | /*
|
---|
2 | * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
|
---|
3 | * Loren Merritt
|
---|
4 | *
|
---|
5 | * This library is free software; you can redistribute it and/or
|
---|
6 | * modify it under the terms of the GNU Lesser General Public
|
---|
7 | * License as published by the Free Software Foundation; either
|
---|
8 | * version 2 of the License, or (at your option) any later version.
|
---|
9 | *
|
---|
10 | * This library is distributed in the hope that it will be useful,
|
---|
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
---|
13 | * Lesser General Public License for more details.
|
---|
14 | *
|
---|
15 | * You should have received a copy of the GNU Lesser General Public
|
---|
16 | * License along with this library; if not, write to the Free Software
|
---|
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
---|
18 | */
|
---|
19 |
|
---|
20 | /**
|
---|
21 | * MMX optimized version of (put|avg)_h264_chroma_mc8.
|
---|
22 | * H264_CHROMA_MC8_TMPL must be defined to the desired function name
|
---|
23 | * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg
|
---|
24 | * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function
|
---|
25 | */
|
---|
26 | static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
---|
27 | {
|
---|
28 | DECLARE_ALIGNED_8(uint64_t, AA);
|
---|
29 | DECLARE_ALIGNED_8(uint64_t, DD);
|
---|
30 | int i;
|
---|
31 |
|
---|
32 | if(y==0 && x==0) {
|
---|
33 | /* no filter needed */
|
---|
34 | H264_CHROMA_MC8_MV0(dst, src, stride, h);
|
---|
35 | return;
|
---|
36 | }
|
---|
37 |
|
---|
38 | assert(x<8 && y<8 && x>=0 && y>=0);
|
---|
39 |
|
---|
40 | if(y==0 || x==0)
|
---|
41 | {
|
---|
42 | /* 1 dimensional filter only */
|
---|
43 | const int dxy = x ? 1 : stride;
|
---|
44 |
|
---|
45 | asm volatile(
|
---|
46 | "movd %0, %%mm5\n\t"
|
---|
47 | "movq %1, %%mm4\n\t"
|
---|
48 | "punpcklwd %%mm5, %%mm5\n\t"
|
---|
49 | "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */
|
---|
50 | "movq %%mm4, %%mm6\n\t"
|
---|
51 | "pxor %%mm7, %%mm7\n\t"
|
---|
52 | "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */
|
---|
53 | "psrlw $1, %%mm6\n\t" /* mm6 = 4 */
|
---|
54 | :: "rm"(x+y), "m"(ff_pw_8));
|
---|
55 |
|
---|
56 | for(i=0; i<h; i++) {
|
---|
57 | asm volatile(
|
---|
58 | /* mm0 = src[0..7], mm1 = src[1..8] */
|
---|
59 | "movq %0, %%mm0\n\t"
|
---|
60 | "movq %1, %%mm2\n\t"
|
---|
61 | :: "m"(src[0]), "m"(src[dxy]));
|
---|
62 |
|
---|
63 | asm volatile(
|
---|
64 | /* [mm0,mm1] = A * src[0..7] */
|
---|
65 | /* [mm2,mm3] = B * src[1..8] */
|
---|
66 | "movq %%mm0, %%mm1\n\t"
|
---|
67 | "movq %%mm2, %%mm3\n\t"
|
---|
68 | "punpcklbw %%mm7, %%mm0\n\t"
|
---|
69 | "punpckhbw %%mm7, %%mm1\n\t"
|
---|
70 | "punpcklbw %%mm7, %%mm2\n\t"
|
---|
71 | "punpckhbw %%mm7, %%mm3\n\t"
|
---|
72 | "pmullw %%mm4, %%mm0\n\t"
|
---|
73 | "pmullw %%mm4, %%mm1\n\t"
|
---|
74 | "pmullw %%mm5, %%mm2\n\t"
|
---|
75 | "pmullw %%mm5, %%mm3\n\t"
|
---|
76 |
|
---|
77 | /* dst[0..7] = (A * src[0..7] + B * src[1..8] + 4) >> 3 */
|
---|
78 | "paddw %%mm6, %%mm0\n\t"
|
---|
79 | "paddw %%mm6, %%mm1\n\t"
|
---|
80 | "paddw %%mm2, %%mm0\n\t"
|
---|
81 | "paddw %%mm3, %%mm1\n\t"
|
---|
82 | "psrlw $3, %%mm0\n\t"
|
---|
83 | "psrlw $3, %%mm1\n\t"
|
---|
84 | "packuswb %%mm1, %%mm0\n\t"
|
---|
85 | H264_CHROMA_OP(%0, %%mm0)
|
---|
86 | "movq %%mm0, %0\n\t"
|
---|
87 | : "=m" (dst[0]));
|
---|
88 |
|
---|
89 | src += stride;
|
---|
90 | dst += stride;
|
---|
91 | }
|
---|
92 | return;
|
---|
93 | }
|
---|
94 |
|
---|
95 | /* general case, bilinear */
|
---|
96 | asm volatile("movd %2, %%mm4\n\t"
|
---|
97 | "movd %3, %%mm6\n\t"
|
---|
98 | "punpcklwd %%mm4, %%mm4\n\t"
|
---|
99 | "punpcklwd %%mm6, %%mm6\n\t"
|
---|
100 | "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
|
---|
101 | "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
|
---|
102 | "movq %%mm4, %%mm5\n\t"
|
---|
103 | "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
|
---|
104 | "psllw $3, %%mm5\n\t"
|
---|
105 | "psllw $3, %%mm6\n\t"
|
---|
106 | "movq %%mm5, %%mm7\n\t"
|
---|
107 | "paddw %%mm6, %%mm7\n\t"
|
---|
108 | "movq %%mm4, %1\n\t" /* DD = x * y */
|
---|
109 | "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
|
---|
110 | "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
|
---|
111 | "paddw %4, %%mm4\n\t"
|
---|
112 | "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
|
---|
113 | "pxor %%mm7, %%mm7\n\t"
|
---|
114 | "movq %%mm4, %0\n\t"
|
---|
115 | : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
|
---|
116 |
|
---|
117 | asm volatile(
|
---|
118 | /* mm0 = src[0..7], mm1 = src[1..8] */
|
---|
119 | "movq %0, %%mm0\n\t"
|
---|
120 | "movq %1, %%mm1\n\t"
|
---|
121 | : : "m" (src[0]), "m" (src[1]));
|
---|
122 |
|
---|
123 | for(i=0; i<h; i++) {
|
---|
124 | src += stride;
|
---|
125 |
|
---|
126 | asm volatile(
|
---|
127 | /* mm2 = A * src[0..3] + B * src[1..4] */
|
---|
128 | /* mm3 = A * src[4..7] + B * src[5..8] */
|
---|
129 | "movq %%mm0, %%mm2\n\t"
|
---|
130 | "movq %%mm1, %%mm3\n\t"
|
---|
131 | "punpckhbw %%mm7, %%mm0\n\t"
|
---|
132 | "punpcklbw %%mm7, %%mm1\n\t"
|
---|
133 | "punpcklbw %%mm7, %%mm2\n\t"
|
---|
134 | "punpckhbw %%mm7, %%mm3\n\t"
|
---|
135 | "pmullw %0, %%mm0\n\t"
|
---|
136 | "pmullw %0, %%mm2\n\t"
|
---|
137 | "pmullw %%mm5, %%mm1\n\t"
|
---|
138 | "pmullw %%mm5, %%mm3\n\t"
|
---|
139 | "paddw %%mm1, %%mm2\n\t"
|
---|
140 | "paddw %%mm0, %%mm3\n\t"
|
---|
141 | : : "m" (AA));
|
---|
142 |
|
---|
143 | asm volatile(
|
---|
144 | /* [mm2,mm3] += C * src[0..7] */
|
---|
145 | "movq %0, %%mm0\n\t"
|
---|
146 | "movq %%mm0, %%mm1\n\t"
|
---|
147 | "punpcklbw %%mm7, %%mm0\n\t"
|
---|
148 | "punpckhbw %%mm7, %%mm1\n\t"
|
---|
149 | "pmullw %%mm6, %%mm0\n\t"
|
---|
150 | "pmullw %%mm6, %%mm1\n\t"
|
---|
151 | "paddw %%mm0, %%mm2\n\t"
|
---|
152 | "paddw %%mm1, %%mm3\n\t"
|
---|
153 | : : "m" (src[0]));
|
---|
154 |
|
---|
155 | asm volatile(
|
---|
156 | /* [mm2,mm3] += D * src[1..8] */
|
---|
157 | "movq %1, %%mm1\n\t"
|
---|
158 | "movq %%mm1, %%mm0\n\t"
|
---|
159 | "movq %%mm1, %%mm4\n\t"
|
---|
160 | "punpcklbw %%mm7, %%mm0\n\t"
|
---|
161 | "punpckhbw %%mm7, %%mm4\n\t"
|
---|
162 | "pmullw %2, %%mm0\n\t"
|
---|
163 | "pmullw %2, %%mm4\n\t"
|
---|
164 | "paddw %%mm0, %%mm2\n\t"
|
---|
165 | "paddw %%mm4, %%mm3\n\t"
|
---|
166 | "movq %0, %%mm0\n\t"
|
---|
167 | : : "m" (src[0]), "m" (src[1]), "m" (DD));
|
---|
168 |
|
---|
169 | asm volatile(
|
---|
170 | /* dst[0..7] = ([mm2,mm3] + 32) >> 6 */
|
---|
171 | "paddw %1, %%mm2\n\t"
|
---|
172 | "paddw %1, %%mm3\n\t"
|
---|
173 | "psrlw $6, %%mm2\n\t"
|
---|
174 | "psrlw $6, %%mm3\n\t"
|
---|
175 | "packuswb %%mm3, %%mm2\n\t"
|
---|
176 | H264_CHROMA_OP(%0, %%mm2)
|
---|
177 | "movq %%mm2, %0\n\t"
|
---|
178 | : "=m" (dst[0]) : "m" (ff_pw_32));
|
---|
179 | dst+= stride;
|
---|
180 | }
|
---|
181 | }
|
---|
182 |
|
---|
183 | static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
---|
184 | {
|
---|
185 | DECLARE_ALIGNED_8(uint64_t, AA);
|
---|
186 | DECLARE_ALIGNED_8(uint64_t, DD);
|
---|
187 | int i;
|
---|
188 |
|
---|
189 | /* no special case for mv=(0,0) in 4x*, since it's much less common than in 8x*.
|
---|
190 | * could still save a few cycles, but maybe not worth the complexity. */
|
---|
191 |
|
---|
192 | assert(x<8 && y<8 && x>=0 && y>=0);
|
---|
193 |
|
---|
194 | asm volatile("movd %2, %%mm4\n\t"
|
---|
195 | "movd %3, %%mm6\n\t"
|
---|
196 | "punpcklwd %%mm4, %%mm4\n\t"
|
---|
197 | "punpcklwd %%mm6, %%mm6\n\t"
|
---|
198 | "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */
|
---|
199 | "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */
|
---|
200 | "movq %%mm4, %%mm5\n\t"
|
---|
201 | "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */
|
---|
202 | "psllw $3, %%mm5\n\t"
|
---|
203 | "psllw $3, %%mm6\n\t"
|
---|
204 | "movq %%mm5, %%mm7\n\t"
|
---|
205 | "paddw %%mm6, %%mm7\n\t"
|
---|
206 | "movq %%mm4, %1\n\t" /* DD = x * y */
|
---|
207 | "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */
|
---|
208 | "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */
|
---|
209 | "paddw %4, %%mm4\n\t"
|
---|
210 | "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */
|
---|
211 | "pxor %%mm7, %%mm7\n\t"
|
---|
212 | "movq %%mm4, %0\n\t"
|
---|
213 | : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64));
|
---|
214 |
|
---|
215 | asm volatile(
|
---|
216 | /* mm0 = src[0..3], mm1 = src[1..4] */
|
---|
217 | "movd %0, %%mm0\n\t"
|
---|
218 | "movd %1, %%mm1\n\t"
|
---|
219 | "punpcklbw %%mm7, %%mm0\n\t"
|
---|
220 | "punpcklbw %%mm7, %%mm1\n\t"
|
---|
221 | : : "m" (src[0]), "m" (src[1]));
|
---|
222 |
|
---|
223 | for(i=0; i<h; i++) {
|
---|
224 | asm volatile(
|
---|
225 | /* mm2 = A * src[0..3] + B * src[1..4] */
|
---|
226 | "movq %%mm0, %%mm2\n\t"
|
---|
227 | "pmullw %0, %%mm2\n\t"
|
---|
228 | "pmullw %%mm5, %%mm1\n\t"
|
---|
229 | "paddw %%mm1, %%mm2\n\t"
|
---|
230 | : : "m" (AA));
|
---|
231 |
|
---|
232 | src += stride;
|
---|
233 | asm volatile(
|
---|
234 | /* mm0 = src[0..3], mm1 = src[1..4] */
|
---|
235 | "movd %0, %%mm0\n\t"
|
---|
236 | "movd %1, %%mm1\n\t"
|
---|
237 | "punpcklbw %%mm7, %%mm0\n\t"
|
---|
238 | "punpcklbw %%mm7, %%mm1\n\t"
|
---|
239 | : : "m" (src[0]), "m" (src[1]));
|
---|
240 |
|
---|
241 | asm volatile(
|
---|
242 | /* mm2 += C * src[0..3] + D * src[1..4] */
|
---|
243 | "movq %%mm0, %%mm3\n\t"
|
---|
244 | "movq %%mm1, %%mm4\n\t"
|
---|
245 | "pmullw %%mm6, %%mm3\n\t"
|
---|
246 | "pmullw %0, %%mm4\n\t"
|
---|
247 | "paddw %%mm3, %%mm2\n\t"
|
---|
248 | "paddw %%mm4, %%mm2\n\t"
|
---|
249 | : : "m" (DD));
|
---|
250 |
|
---|
251 | asm volatile(
|
---|
252 | /* dst[0..3] = pack((mm2 + 32) >> 6) */
|
---|
253 | "paddw %1, %%mm2\n\t"
|
---|
254 | "psrlw $6, %%mm2\n\t"
|
---|
255 | "packuswb %%mm7, %%mm2\n\t"
|
---|
256 | H264_CHROMA_OP4(%0, %%mm2, %%mm3)
|
---|
257 | "movd %%mm2, %0\n\t"
|
---|
258 | : "=m" (dst[0]) : "m" (ff_pw_32));
|
---|
259 | dst += stride;
|
---|
260 | }
|
---|
261 | }
|
---|
262 |
|
---|
263 | #ifdef H264_CHROMA_MC2_TMPL
|
---|
264 | static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y)
|
---|
265 | {
|
---|
266 | int CD=((1<<16)-1)*x*y + 8*y;
|
---|
267 | int AB=((8<<16)-8)*x + 64 - CD;
|
---|
268 | int i;
|
---|
269 |
|
---|
270 | asm volatile(
|
---|
271 | /* mm5 = {A,B,A,B} */
|
---|
272 | /* mm6 = {C,D,C,D} */
|
---|
273 | "movd %0, %%mm5\n\t"
|
---|
274 | "movd %1, %%mm6\n\t"
|
---|
275 | "punpckldq %%mm5, %%mm5\n\t"
|
---|
276 | "punpckldq %%mm6, %%mm6\n\t"
|
---|
277 | "pxor %%mm7, %%mm7\n\t"
|
---|
278 | :: "r"(AB), "r"(CD));
|
---|
279 |
|
---|
280 | asm volatile(
|
---|
281 | /* mm0 = src[0,1,1,2] */
|
---|
282 | "movd %0, %%mm0\n\t"
|
---|
283 | "punpcklbw %%mm7, %%mm0\n\t"
|
---|
284 | "pshufw $0x94, %%mm0, %%mm0\n\t"
|
---|
285 | :: "m"(src[0]));
|
---|
286 |
|
---|
287 | for(i=0; i<h; i++) {
|
---|
288 | asm volatile(
|
---|
289 | /* mm1 = A * src[0,1] + B * src[1,2] */
|
---|
290 | "movq %%mm0, %%mm1\n\t"
|
---|
291 | "pmaddwd %%mm5, %%mm1\n\t"
|
---|
292 | ::);
|
---|
293 |
|
---|
294 | src += stride;
|
---|
295 | asm volatile(
|
---|
296 | /* mm0 = src[0,1,1,2] */
|
---|
297 | "movd %0, %%mm0\n\t"
|
---|
298 | "punpcklbw %%mm7, %%mm0\n\t"
|
---|
299 | "pshufw $0x94, %%mm0, %%mm0\n\t"
|
---|
300 | :: "m"(src[0]));
|
---|
301 |
|
---|
302 | asm volatile(
|
---|
303 | /* mm1 += C * src[0,1] + D * src[1,2] */
|
---|
304 | "movq %%mm0, %%mm2\n\t"
|
---|
305 | "pmaddwd %%mm6, %%mm2\n\t"
|
---|
306 | "paddw %%mm2, %%mm1\n\t"
|
---|
307 | ::);
|
---|
308 |
|
---|
309 | asm volatile(
|
---|
310 | /* dst[0,1] = pack((mm1 + 32) >> 6) */
|
---|
311 | "paddw %1, %%mm1\n\t"
|
---|
312 | "psrlw $6, %%mm1\n\t"
|
---|
313 | "packssdw %%mm7, %%mm1\n\t"
|
---|
314 | "packuswb %%mm7, %%mm1\n\t"
|
---|
315 | /* writes garbage to the right of dst.
|
---|
316 | * ok because partitions are processed from left to right. */
|
---|
317 | H264_CHROMA_OP4(%0, %%mm1, %%mm3)
|
---|
318 | "movd %%mm1, %0\n\t"
|
---|
319 | : "=m" (dst[0]) : "m" (ff_pw_32));
|
---|
320 | dst += stride;
|
---|
321 | }
|
---|
322 | }
|
---|
323 | #endif
|
---|
324 |
|
---|