VirtualBox

source: vbox/trunk/src/recompiler/target-i386/ops_sse.h@ 50064

Last change on this file since 50064 was 39376, checked in by vboxsync, 13 years ago

recompiler: merged upstream changeset bc4268998d154b9b3cc86a7b6bd932cc974591c9 (x86: fix pcmpestrm and pcmpistrm)

  • Property svn:eol-style set to native
File size: 59.3 KB
Line 
1/*
2 * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
3 *
4 * Copyright (c) 2005 Fabrice Bellard
5 * Copyright (c) 2008 Intel Corporation <[email protected]>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#if SHIFT == 0
31#define Reg MMXReg
32#define XMM_ONLY(...)
33#define B(n) MMX_B(n)
34#define W(n) MMX_W(n)
35#define L(n) MMX_L(n)
36#define Q(n) q
37#define SUFFIX _mmx
38#else
39#define Reg XMMReg
40#define XMM_ONLY(...) __VA_ARGS__
41#define B(n) XMM_B(n)
42#define W(n) XMM_W(n)
43#define L(n) XMM_L(n)
44#define Q(n) XMM_Q(n)
45#define SUFFIX _xmm
46#endif
47
48void glue(helper_psrlw, SUFFIX)(Reg *d, Reg *s)
49{
50 int shift;
51
52 if (s->Q(0) > 15) {
53 d->Q(0) = 0;
54#if SHIFT == 1
55 d->Q(1) = 0;
56#endif
57 } else {
58 shift = s->B(0);
59 d->W(0) >>= shift;
60 d->W(1) >>= shift;
61 d->W(2) >>= shift;
62 d->W(3) >>= shift;
63#if SHIFT == 1
64 d->W(4) >>= shift;
65 d->W(5) >>= shift;
66 d->W(6) >>= shift;
67 d->W(7) >>= shift;
68#endif
69 }
70}
71
72void glue(helper_psraw, SUFFIX)(Reg *d, Reg *s)
73{
74 int shift;
75
76 if (s->Q(0) > 15) {
77 shift = 15;
78 } else {
79 shift = s->B(0);
80 }
81 d->W(0) = (int16_t)d->W(0) >> shift;
82 d->W(1) = (int16_t)d->W(1) >> shift;
83 d->W(2) = (int16_t)d->W(2) >> shift;
84 d->W(3) = (int16_t)d->W(3) >> shift;
85#if SHIFT == 1
86 d->W(4) = (int16_t)d->W(4) >> shift;
87 d->W(5) = (int16_t)d->W(5) >> shift;
88 d->W(6) = (int16_t)d->W(6) >> shift;
89 d->W(7) = (int16_t)d->W(7) >> shift;
90#endif
91}
92
93void glue(helper_psllw, SUFFIX)(Reg *d, Reg *s)
94{
95 int shift;
96
97 if (s->Q(0) > 15) {
98 d->Q(0) = 0;
99#if SHIFT == 1
100 d->Q(1) = 0;
101#endif
102 } else {
103 shift = s->B(0);
104 d->W(0) <<= shift;
105 d->W(1) <<= shift;
106 d->W(2) <<= shift;
107 d->W(3) <<= shift;
108#if SHIFT == 1
109 d->W(4) <<= shift;
110 d->W(5) <<= shift;
111 d->W(6) <<= shift;
112 d->W(7) <<= shift;
113#endif
114 }
115}
116
117void glue(helper_psrld, SUFFIX)(Reg *d, Reg *s)
118{
119 int shift;
120
121 if (s->Q(0) > 31) {
122 d->Q(0) = 0;
123#if SHIFT == 1
124 d->Q(1) = 0;
125#endif
126 } else {
127 shift = s->B(0);
128 d->L(0) >>= shift;
129 d->L(1) >>= shift;
130#if SHIFT == 1
131 d->L(2) >>= shift;
132 d->L(3) >>= shift;
133#endif
134 }
135}
136
137void glue(helper_psrad, SUFFIX)(Reg *d, Reg *s)
138{
139 int shift;
140
141 if (s->Q(0) > 31) {
142 shift = 31;
143 } else {
144 shift = s->B(0);
145 }
146 d->L(0) = (int32_t)d->L(0) >> shift;
147 d->L(1) = (int32_t)d->L(1) >> shift;
148#if SHIFT == 1
149 d->L(2) = (int32_t)d->L(2) >> shift;
150 d->L(3) = (int32_t)d->L(3) >> shift;
151#endif
152}
153
154void glue(helper_pslld, SUFFIX)(Reg *d, Reg *s)
155{
156 int shift;
157
158 if (s->Q(0) > 31) {
159 d->Q(0) = 0;
160#if SHIFT == 1
161 d->Q(1) = 0;
162#endif
163 } else {
164 shift = s->B(0);
165 d->L(0) <<= shift;
166 d->L(1) <<= shift;
167#if SHIFT == 1
168 d->L(2) <<= shift;
169 d->L(3) <<= shift;
170#endif
171 }
172}
173
174void glue(helper_psrlq, SUFFIX)(Reg *d, Reg *s)
175{
176 int shift;
177
178 if (s->Q(0) > 63) {
179 d->Q(0) = 0;
180#if SHIFT == 1
181 d->Q(1) = 0;
182#endif
183 } else {
184 shift = s->B(0);
185 d->Q(0) >>= shift;
186#if SHIFT == 1
187 d->Q(1) >>= shift;
188#endif
189 }
190}
191
192void glue(helper_psllq, SUFFIX)(Reg *d, Reg *s)
193{
194 int shift;
195
196 if (s->Q(0) > 63) {
197 d->Q(0) = 0;
198#if SHIFT == 1
199 d->Q(1) = 0;
200#endif
201 } else {
202 shift = s->B(0);
203 d->Q(0) <<= shift;
204#if SHIFT == 1
205 d->Q(1) <<= shift;
206#endif
207 }
208}
209
210#if SHIFT == 1
211void glue(helper_psrldq, SUFFIX)(Reg *d, Reg *s)
212{
213 int shift, i;
214
215 shift = s->L(0);
216 if (shift > 16)
217 shift = 16;
218 for(i = 0; i < 16 - shift; i++)
219 d->B(i) = d->B(i + shift);
220 for(i = 16 - shift; i < 16; i++)
221 d->B(i) = 0;
222}
223
224void glue(helper_pslldq, SUFFIX)(Reg *d, Reg *s)
225{
226 int shift, i;
227
228 shift = s->L(0);
229 if (shift > 16)
230 shift = 16;
231 for(i = 15; i >= shift; i--)
232 d->B(i) = d->B(i - shift);
233 for(i = 0; i < shift; i++)
234 d->B(i) = 0;
235}
236#endif
237
238#define SSE_HELPER_B(name, F)\
239void glue(name, SUFFIX) (Reg *d, Reg *s)\
240{\
241 d->B(0) = F(d->B(0), s->B(0));\
242 d->B(1) = F(d->B(1), s->B(1));\
243 d->B(2) = F(d->B(2), s->B(2));\
244 d->B(3) = F(d->B(3), s->B(3));\
245 d->B(4) = F(d->B(4), s->B(4));\
246 d->B(5) = F(d->B(5), s->B(5));\
247 d->B(6) = F(d->B(6), s->B(6));\
248 d->B(7) = F(d->B(7), s->B(7));\
249 XMM_ONLY(\
250 d->B(8) = F(d->B(8), s->B(8));\
251 d->B(9) = F(d->B(9), s->B(9));\
252 d->B(10) = F(d->B(10), s->B(10));\
253 d->B(11) = F(d->B(11), s->B(11));\
254 d->B(12) = F(d->B(12), s->B(12));\
255 d->B(13) = F(d->B(13), s->B(13));\
256 d->B(14) = F(d->B(14), s->B(14));\
257 d->B(15) = F(d->B(15), s->B(15));\
258 )\
259}
260
261#define SSE_HELPER_W(name, F)\
262void glue(name, SUFFIX) (Reg *d, Reg *s)\
263{\
264 d->W(0) = F(d->W(0), s->W(0));\
265 d->W(1) = F(d->W(1), s->W(1));\
266 d->W(2) = F(d->W(2), s->W(2));\
267 d->W(3) = F(d->W(3), s->W(3));\
268 XMM_ONLY(\
269 d->W(4) = F(d->W(4), s->W(4));\
270 d->W(5) = F(d->W(5), s->W(5));\
271 d->W(6) = F(d->W(6), s->W(6));\
272 d->W(7) = F(d->W(7), s->W(7));\
273 )\
274}
275
276#define SSE_HELPER_L(name, F)\
277void glue(name, SUFFIX) (Reg *d, Reg *s)\
278{\
279 d->L(0) = F(d->L(0), s->L(0));\
280 d->L(1) = F(d->L(1), s->L(1));\
281 XMM_ONLY(\
282 d->L(2) = F(d->L(2), s->L(2));\
283 d->L(3) = F(d->L(3), s->L(3));\
284 )\
285}
286
287#define SSE_HELPER_Q(name, F)\
288void glue(name, SUFFIX) (Reg *d, Reg *s)\
289{\
290 d->Q(0) = F(d->Q(0), s->Q(0));\
291 XMM_ONLY(\
292 d->Q(1) = F(d->Q(1), s->Q(1));\
293 )\
294}
295
296#if SHIFT == 0
297static inline int satub(int x)
298{
299 if (x < 0)
300 return 0;
301 else if (x > 255)
302 return 255;
303 else
304 return x;
305}
306
307static inline int satuw(int x)
308{
309 if (x < 0)
310 return 0;
311 else if (x > 65535)
312 return 65535;
313 else
314 return x;
315}
316
317static inline int satsb(int x)
318{
319 if (x < -128)
320 return -128;
321 else if (x > 127)
322 return 127;
323 else
324 return x;
325}
326
327static inline int satsw(int x)
328{
329 if (x < -32768)
330 return -32768;
331 else if (x > 32767)
332 return 32767;
333 else
334 return x;
335}
336
337#define FADD(a, b) ((a) + (b))
338#define FADDUB(a, b) satub((a) + (b))
339#define FADDUW(a, b) satuw((a) + (b))
340#define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
341#define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
342
343#define FSUB(a, b) ((a) - (b))
344#define FSUBUB(a, b) satub((a) - (b))
345#define FSUBUW(a, b) satuw((a) - (b))
346#define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
347#define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
348#define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
349#define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
350#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
351#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
352
353#define FAND(a, b) (a) & (b)
354#define FANDN(a, b) ((~(a)) & (b))
355#define FOR(a, b) (a) | (b)
356#define FXOR(a, b) (a) ^ (b)
357
358#define FCMPGTB(a, b) (int8_t)(a) > (int8_t)(b) ? -1 : 0
359#define FCMPGTW(a, b) (int16_t)(a) > (int16_t)(b) ? -1 : 0
360#define FCMPGTL(a, b) (int32_t)(a) > (int32_t)(b) ? -1 : 0
361#define FCMPEQ(a, b) (a) == (b) ? -1 : 0
362
363#define FMULLW(a, b) (a) * (b)
364#define FMULHRW(a, b) ((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16
365#define FMULHUW(a, b) (a) * (b) >> 16
366#define FMULHW(a, b) (int16_t)(a) * (int16_t)(b) >> 16
367
368#define FAVG(a, b) ((a) + (b) + 1) >> 1
369#endif
370
371SSE_HELPER_B(helper_paddb, FADD)
372SSE_HELPER_W(helper_paddw, FADD)
373SSE_HELPER_L(helper_paddl, FADD)
374SSE_HELPER_Q(helper_paddq, FADD)
375
376SSE_HELPER_B(helper_psubb, FSUB)
377SSE_HELPER_W(helper_psubw, FSUB)
378SSE_HELPER_L(helper_psubl, FSUB)
379SSE_HELPER_Q(helper_psubq, FSUB)
380
381SSE_HELPER_B(helper_paddusb, FADDUB)
382SSE_HELPER_B(helper_paddsb, FADDSB)
383SSE_HELPER_B(helper_psubusb, FSUBUB)
384SSE_HELPER_B(helper_psubsb, FSUBSB)
385
386SSE_HELPER_W(helper_paddusw, FADDUW)
387SSE_HELPER_W(helper_paddsw, FADDSW)
388SSE_HELPER_W(helper_psubusw, FSUBUW)
389SSE_HELPER_W(helper_psubsw, FSUBSW)
390
391SSE_HELPER_B(helper_pminub, FMINUB)
392SSE_HELPER_B(helper_pmaxub, FMAXUB)
393
394SSE_HELPER_W(helper_pminsw, FMINSW)
395SSE_HELPER_W(helper_pmaxsw, FMAXSW)
396
397SSE_HELPER_Q(helper_pand, FAND)
398SSE_HELPER_Q(helper_pandn, FANDN)
399SSE_HELPER_Q(helper_por, FOR)
400SSE_HELPER_Q(helper_pxor, FXOR)
401
402SSE_HELPER_B(helper_pcmpgtb, FCMPGTB)
403SSE_HELPER_W(helper_pcmpgtw, FCMPGTW)
404SSE_HELPER_L(helper_pcmpgtl, FCMPGTL)
405
406SSE_HELPER_B(helper_pcmpeqb, FCMPEQ)
407SSE_HELPER_W(helper_pcmpeqw, FCMPEQ)
408SSE_HELPER_L(helper_pcmpeql, FCMPEQ)
409
410SSE_HELPER_W(helper_pmullw, FMULLW)
411#if SHIFT == 0
412SSE_HELPER_W(helper_pmulhrw, FMULHRW)
413#endif
414SSE_HELPER_W(helper_pmulhuw, FMULHUW)
415SSE_HELPER_W(helper_pmulhw, FMULHW)
416
417SSE_HELPER_B(helper_pavgb, FAVG)
418SSE_HELPER_W(helper_pavgw, FAVG)
419
420void glue(helper_pmuludq, SUFFIX) (Reg *d, Reg *s)
421{
422 d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0);
423#if SHIFT == 1
424 d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2);
425#endif
426}
427
428void glue(helper_pmaddwd, SUFFIX) (Reg *d, Reg *s)
429{
430 int i;
431
432 for(i = 0; i < (2 << SHIFT); i++) {
433 d->L(i) = (int16_t)s->W(2*i) * (int16_t)d->W(2*i) +
434 (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1);
435 }
436}
437
438#if SHIFT == 0
439static inline int abs1(int a)
440{
441 if (a < 0)
442 return -a;
443 else
444 return a;
445}
446#endif
447void glue(helper_psadbw, SUFFIX) (Reg *d, Reg *s)
448{
449 unsigned int val;
450
451 val = 0;
452 val += abs1(d->B(0) - s->B(0));
453 val += abs1(d->B(1) - s->B(1));
454 val += abs1(d->B(2) - s->B(2));
455 val += abs1(d->B(3) - s->B(3));
456 val += abs1(d->B(4) - s->B(4));
457 val += abs1(d->B(5) - s->B(5));
458 val += abs1(d->B(6) - s->B(6));
459 val += abs1(d->B(7) - s->B(7));
460 d->Q(0) = val;
461#if SHIFT == 1
462 val = 0;
463 val += abs1(d->B(8) - s->B(8));
464 val += abs1(d->B(9) - s->B(9));
465 val += abs1(d->B(10) - s->B(10));
466 val += abs1(d->B(11) - s->B(11));
467 val += abs1(d->B(12) - s->B(12));
468 val += abs1(d->B(13) - s->B(13));
469 val += abs1(d->B(14) - s->B(14));
470 val += abs1(d->B(15) - s->B(15));
471 d->Q(1) = val;
472#endif
473}
474
475void glue(helper_maskmov, SUFFIX) (Reg *d, Reg *s, target_ulong a0)
476{
477 int i;
478 for(i = 0; i < (8 << SHIFT); i++) {
479 if (s->B(i) & 0x80)
480 stb(a0 + i, d->B(i));
481 }
482}
483
484void glue(helper_movl_mm_T0, SUFFIX) (Reg *d, uint32_t val)
485{
486 d->L(0) = val;
487 d->L(1) = 0;
488#if SHIFT == 1
489 d->Q(1) = 0;
490#endif
491}
492
493#ifdef TARGET_X86_64
494void glue(helper_movq_mm_T0, SUFFIX) (Reg *d, uint64_t val)
495{
496 d->Q(0) = val;
497#if SHIFT == 1
498 d->Q(1) = 0;
499#endif
500}
501#endif
502
503#if SHIFT == 0
504void glue(helper_pshufw, SUFFIX) (Reg *d, Reg *s, int order)
505{
506 Reg r;
507 r.W(0) = s->W(order & 3);
508 r.W(1) = s->W((order >> 2) & 3);
509 r.W(2) = s->W((order >> 4) & 3);
510 r.W(3) = s->W((order >> 6) & 3);
511 *d = r;
512}
513#else
514void helper_shufps(Reg *d, Reg *s, int order)
515{
516 Reg r;
517 r.L(0) = d->L(order & 3);
518 r.L(1) = d->L((order >> 2) & 3);
519 r.L(2) = s->L((order >> 4) & 3);
520 r.L(3) = s->L((order >> 6) & 3);
521 *d = r;
522}
523
524void helper_shufpd(Reg *d, Reg *s, int order)
525{
526 Reg r;
527 r.Q(0) = d->Q(order & 1);
528 r.Q(1) = s->Q((order >> 1) & 1);
529 *d = r;
530}
531
532void glue(helper_pshufd, SUFFIX) (Reg *d, Reg *s, int order)
533{
534 Reg r;
535 r.L(0) = s->L(order & 3);
536 r.L(1) = s->L((order >> 2) & 3);
537 r.L(2) = s->L((order >> 4) & 3);
538 r.L(3) = s->L((order >> 6) & 3);
539 *d = r;
540}
541
542void glue(helper_pshuflw, SUFFIX) (Reg *d, Reg *s, int order)
543{
544 Reg r;
545 r.W(0) = s->W(order & 3);
546 r.W(1) = s->W((order >> 2) & 3);
547 r.W(2) = s->W((order >> 4) & 3);
548 r.W(3) = s->W((order >> 6) & 3);
549 r.Q(1) = s->Q(1);
550 *d = r;
551}
552
553void glue(helper_pshufhw, SUFFIX) (Reg *d, Reg *s, int order)
554{
555 Reg r;
556 r.Q(0) = s->Q(0);
557 r.W(4) = s->W(4 + (order & 3));
558 r.W(5) = s->W(4 + ((order >> 2) & 3));
559 r.W(6) = s->W(4 + ((order >> 4) & 3));
560 r.W(7) = s->W(4 + ((order >> 6) & 3));
561 *d = r;
562}
563#endif
564
565#if SHIFT == 1
566/* FPU ops */
567/* XXX: not accurate */
568
569#define SSE_HELPER_S(name, F)\
570void helper_ ## name ## ps (Reg *d, Reg *s)\
571{\
572 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
573 d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1));\
574 d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2));\
575 d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3));\
576}\
577\
578void helper_ ## name ## ss (Reg *d, Reg *s)\
579{\
580 d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
581}\
582void helper_ ## name ## pd (Reg *d, Reg *s)\
583{\
584 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
585 d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1));\
586}\
587\
588void helper_ ## name ## sd (Reg *d, Reg *s)\
589{\
590 d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
591}
592
593#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
594#define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
595#define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status)
596#define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status)
597#define FPU_MIN(size, a, b) (a) < (b) ? (a) : (b)
598#define FPU_MAX(size, a, b) (a) > (b) ? (a) : (b)
599#define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status)
600
601SSE_HELPER_S(add, FPU_ADD)
602SSE_HELPER_S(sub, FPU_SUB)
603SSE_HELPER_S(mul, FPU_MUL)
604SSE_HELPER_S(div, FPU_DIV)
605SSE_HELPER_S(min, FPU_MIN)
606SSE_HELPER_S(max, FPU_MAX)
607SSE_HELPER_S(sqrt, FPU_SQRT)
608
609
610/* float to float conversions */
611void helper_cvtps2pd(Reg *d, Reg *s)
612{
613 float32 s0, s1;
614 s0 = s->XMM_S(0);
615 s1 = s->XMM_S(1);
616 d->XMM_D(0) = float32_to_float64(s0, &env->sse_status);
617 d->XMM_D(1) = float32_to_float64(s1, &env->sse_status);
618}
619
620void helper_cvtpd2ps(Reg *d, Reg *s)
621{
622 d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status);
623 d->XMM_S(1) = float64_to_float32(s->XMM_D(1), &env->sse_status);
624 d->Q(1) = 0;
625}
626
627void helper_cvtss2sd(Reg *d, Reg *s)
628{
629 d->XMM_D(0) = float32_to_float64(s->XMM_S(0), &env->sse_status);
630}
631
632void helper_cvtsd2ss(Reg *d, Reg *s)
633{
634 d->XMM_S(0) = float64_to_float32(s->XMM_D(0), &env->sse_status);
635}
636
637/* integer to float */
638void helper_cvtdq2ps(Reg *d, Reg *s)
639{
640 d->XMM_S(0) = int32_to_float32(s->XMM_L(0), &env->sse_status);
641 d->XMM_S(1) = int32_to_float32(s->XMM_L(1), &env->sse_status);
642 d->XMM_S(2) = int32_to_float32(s->XMM_L(2), &env->sse_status);
643 d->XMM_S(3) = int32_to_float32(s->XMM_L(3), &env->sse_status);
644}
645
646void helper_cvtdq2pd(Reg *d, Reg *s)
647{
648 int32_t l0, l1;
649 l0 = (int32_t)s->XMM_L(0);
650 l1 = (int32_t)s->XMM_L(1);
651 d->XMM_D(0) = int32_to_float64(l0, &env->sse_status);
652 d->XMM_D(1) = int32_to_float64(l1, &env->sse_status);
653}
654
655void helper_cvtpi2ps(XMMReg *d, MMXReg *s)
656{
657 d->XMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status);
658 d->XMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status);
659}
660
661void helper_cvtpi2pd(XMMReg *d, MMXReg *s)
662{
663 d->XMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status);
664 d->XMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status);
665}
666
667void helper_cvtsi2ss(XMMReg *d, uint32_t val)
668{
669 d->XMM_S(0) = int32_to_float32(val, &env->sse_status);
670}
671
672void helper_cvtsi2sd(XMMReg *d, uint32_t val)
673{
674 d->XMM_D(0) = int32_to_float64(val, &env->sse_status);
675}
676
677#ifdef TARGET_X86_64
678void helper_cvtsq2ss(XMMReg *d, uint64_t val)
679{
680 d->XMM_S(0) = int64_to_float32(val, &env->sse_status);
681}
682
683void helper_cvtsq2sd(XMMReg *d, uint64_t val)
684{
685 d->XMM_D(0) = int64_to_float64(val, &env->sse_status);
686}
687#endif
688
689/* float to integer */
690void helper_cvtps2dq(XMMReg *d, XMMReg *s)
691{
692 d->XMM_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status);
693 d->XMM_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status);
694 d->XMM_L(2) = float32_to_int32(s->XMM_S(2), &env->sse_status);
695 d->XMM_L(3) = float32_to_int32(s->XMM_S(3), &env->sse_status);
696}
697
698void helper_cvtpd2dq(XMMReg *d, XMMReg *s)
699{
700 d->XMM_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status);
701 d->XMM_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status);
702 d->XMM_Q(1) = 0;
703}
704
705void helper_cvtps2pi(MMXReg *d, XMMReg *s)
706{
707 d->MMX_L(0) = float32_to_int32(s->XMM_S(0), &env->sse_status);
708 d->MMX_L(1) = float32_to_int32(s->XMM_S(1), &env->sse_status);
709}
710
711void helper_cvtpd2pi(MMXReg *d, XMMReg *s)
712{
713 d->MMX_L(0) = float64_to_int32(s->XMM_D(0), &env->sse_status);
714 d->MMX_L(1) = float64_to_int32(s->XMM_D(1), &env->sse_status);
715}
716
717int32_t helper_cvtss2si(XMMReg *s)
718{
719 return float32_to_int32(s->XMM_S(0), &env->sse_status);
720}
721
722int32_t helper_cvtsd2si(XMMReg *s)
723{
724 return float64_to_int32(s->XMM_D(0), &env->sse_status);
725}
726
727#ifdef TARGET_X86_64
728int64_t helper_cvtss2sq(XMMReg *s)
729{
730 return float32_to_int64(s->XMM_S(0), &env->sse_status);
731}
732
733int64_t helper_cvtsd2sq(XMMReg *s)
734{
735 return float64_to_int64(s->XMM_D(0), &env->sse_status);
736}
737#endif
738
739/* float to integer truncated */
740void helper_cvttps2dq(XMMReg *d, XMMReg *s)
741{
742 d->XMM_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
743 d->XMM_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status);
744 d->XMM_L(2) = float32_to_int32_round_to_zero(s->XMM_S(2), &env->sse_status);
745 d->XMM_L(3) = float32_to_int32_round_to_zero(s->XMM_S(3), &env->sse_status);
746}
747
748void helper_cvttpd2dq(XMMReg *d, XMMReg *s)
749{
750 d->XMM_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
751 d->XMM_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status);
752 d->XMM_Q(1) = 0;
753}
754
755void helper_cvttps2pi(MMXReg *d, XMMReg *s)
756{
757 d->MMX_L(0) = float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
758 d->MMX_L(1) = float32_to_int32_round_to_zero(s->XMM_S(1), &env->sse_status);
759}
760
761void helper_cvttpd2pi(MMXReg *d, XMMReg *s)
762{
763 d->MMX_L(0) = float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
764 d->MMX_L(1) = float64_to_int32_round_to_zero(s->XMM_D(1), &env->sse_status);
765}
766
767int32_t helper_cvttss2si(XMMReg *s)
768{
769 return float32_to_int32_round_to_zero(s->XMM_S(0), &env->sse_status);
770}
771
772int32_t helper_cvttsd2si(XMMReg *s)
773{
774 return float64_to_int32_round_to_zero(s->XMM_D(0), &env->sse_status);
775}
776
777#ifdef TARGET_X86_64
778int64_t helper_cvttss2sq(XMMReg *s)
779{
780 return float32_to_int64_round_to_zero(s->XMM_S(0), &env->sse_status);
781}
782
783int64_t helper_cvttsd2sq(XMMReg *s)
784{
785 return float64_to_int64_round_to_zero(s->XMM_D(0), &env->sse_status);
786}
787#endif
788
789void helper_rsqrtps(XMMReg *d, XMMReg *s)
790{
791 d->XMM_S(0) = approx_rsqrt(s->XMM_S(0));
792 d->XMM_S(1) = approx_rsqrt(s->XMM_S(1));
793 d->XMM_S(2) = approx_rsqrt(s->XMM_S(2));
794 d->XMM_S(3) = approx_rsqrt(s->XMM_S(3));
795}
796
797void helper_rsqrtss(XMMReg *d, XMMReg *s)
798{
799 d->XMM_S(0) = approx_rsqrt(s->XMM_S(0));
800}
801
802void helper_rcpps(XMMReg *d, XMMReg *s)
803{
804 d->XMM_S(0) = approx_rcp(s->XMM_S(0));
805 d->XMM_S(1) = approx_rcp(s->XMM_S(1));
806 d->XMM_S(2) = approx_rcp(s->XMM_S(2));
807 d->XMM_S(3) = approx_rcp(s->XMM_S(3));
808}
809
810void helper_rcpss(XMMReg *d, XMMReg *s)
811{
812 d->XMM_S(0) = approx_rcp(s->XMM_S(0));
813}
814
815static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
816{
817 uint64_t mask;
818
819 if (len == 0) {
820 mask = ~0LL;
821 } else {
822 mask = (1ULL << len) - 1;
823 }
824 return (src >> shift) & mask;
825}
826
827void helper_extrq_r(XMMReg *d, XMMReg *s)
828{
829 d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), s->XMM_B(1), s->XMM_B(0));
830}
831
832void helper_extrq_i(XMMReg *d, int index, int length)
833{
834 d->XMM_Q(0) = helper_extrq(d->XMM_Q(0), index, length);
835}
836
837static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
838{
839 uint64_t mask;
840
841 if (len == 0) {
842 mask = ~0ULL;
843 } else {
844 mask = (1ULL << len) - 1;
845 }
846 return (src & ~(mask << shift)) | ((src & mask) << shift);
847}
848
849void helper_insertq_r(XMMReg *d, XMMReg *s)
850{
851 d->XMM_Q(0) = helper_insertq(s->XMM_Q(0), s->XMM_B(9), s->XMM_B(8));
852}
853
854void helper_insertq_i(XMMReg *d, int index, int length)
855{
856 d->XMM_Q(0) = helper_insertq(d->XMM_Q(0), index, length);
857}
858
859void helper_haddps(XMMReg *d, XMMReg *s)
860{
861 XMMReg r;
862 r.XMM_S(0) = d->XMM_S(0) + d->XMM_S(1);
863 r.XMM_S(1) = d->XMM_S(2) + d->XMM_S(3);
864 r.XMM_S(2) = s->XMM_S(0) + s->XMM_S(1);
865 r.XMM_S(3) = s->XMM_S(2) + s->XMM_S(3);
866 *d = r;
867}
868
869void helper_haddpd(XMMReg *d, XMMReg *s)
870{
871 XMMReg r;
872 r.XMM_D(0) = d->XMM_D(0) + d->XMM_D(1);
873 r.XMM_D(1) = s->XMM_D(0) + s->XMM_D(1);
874 *d = r;
875}
876
877void helper_hsubps(XMMReg *d, XMMReg *s)
878{
879 XMMReg r;
880 r.XMM_S(0) = d->XMM_S(0) - d->XMM_S(1);
881 r.XMM_S(1) = d->XMM_S(2) - d->XMM_S(3);
882 r.XMM_S(2) = s->XMM_S(0) - s->XMM_S(1);
883 r.XMM_S(3) = s->XMM_S(2) - s->XMM_S(3);
884 *d = r;
885}
886
887void helper_hsubpd(XMMReg *d, XMMReg *s)
888{
889 XMMReg r;
890 r.XMM_D(0) = d->XMM_D(0) - d->XMM_D(1);
891 r.XMM_D(1) = s->XMM_D(0) - s->XMM_D(1);
892 *d = r;
893}
894
895void helper_addsubps(XMMReg *d, XMMReg *s)
896{
897 d->XMM_S(0) = d->XMM_S(0) - s->XMM_S(0);
898 d->XMM_S(1) = d->XMM_S(1) + s->XMM_S(1);
899 d->XMM_S(2) = d->XMM_S(2) - s->XMM_S(2);
900 d->XMM_S(3) = d->XMM_S(3) + s->XMM_S(3);
901}
902
903void helper_addsubpd(XMMReg *d, XMMReg *s)
904{
905 d->XMM_D(0) = d->XMM_D(0) - s->XMM_D(0);
906 d->XMM_D(1) = d->XMM_D(1) + s->XMM_D(1);
907}
908
909/* XXX: unordered */
910#define SSE_HELPER_CMP(name, F)\
911void helper_ ## name ## ps (Reg *d, Reg *s)\
912{\
913 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
914 d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1));\
915 d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2));\
916 d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3));\
917}\
918\
919void helper_ ## name ## ss (Reg *d, Reg *s)\
920{\
921 d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
922}\
923void helper_ ## name ## pd (Reg *d, Reg *s)\
924{\
925 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
926 d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1));\
927}\
928\
929void helper_ ## name ## sd (Reg *d, Reg *s)\
930{\
931 d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
932}
933
934#define FPU_CMPEQ(size, a, b) float ## size ## _eq(a, b, &env->sse_status) ? -1 : 0
935#define FPU_CMPLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0
936#define FPU_CMPLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? -1 : 0
937#define FPU_CMPUNORD(size, a, b) float ## size ## _unordered(a, b, &env->sse_status) ? - 1 : 0
938#define FPU_CMPNEQ(size, a, b) float ## size ## _eq(a, b, &env->sse_status) ? 0 : -1
939#define FPU_CMPNLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1
940#define FPU_CMPNLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? 0 : -1
941#define FPU_CMPORD(size, a, b) float ## size ## _unordered(a, b, &env->sse_status) ? 0 : -1
942
943SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
944SSE_HELPER_CMP(cmplt, FPU_CMPLT)
945SSE_HELPER_CMP(cmple, FPU_CMPLE)
946SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
947SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
948SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
949SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
950SSE_HELPER_CMP(cmpord, FPU_CMPORD)
951
952static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
953
954void helper_ucomiss(Reg *d, Reg *s)
955{
956 int ret;
957 float32 s0, s1;
958
959 s0 = d->XMM_S(0);
960 s1 = s->XMM_S(0);
961 ret = float32_compare_quiet(s0, s1, &env->sse_status);
962 CC_SRC = comis_eflags[ret + 1];
963}
964
965void helper_comiss(Reg *d, Reg *s)
966{
967 int ret;
968 float32 s0, s1;
969
970 s0 = d->XMM_S(0);
971 s1 = s->XMM_S(0);
972 ret = float32_compare(s0, s1, &env->sse_status);
973 CC_SRC = comis_eflags[ret + 1];
974}
975
976void helper_ucomisd(Reg *d, Reg *s)
977{
978 int ret;
979 float64 d0, d1;
980
981 d0 = d->XMM_D(0);
982 d1 = s->XMM_D(0);
983 ret = float64_compare_quiet(d0, d1, &env->sse_status);
984 CC_SRC = comis_eflags[ret + 1];
985}
986
987void helper_comisd(Reg *d, Reg *s)
988{
989 int ret;
990 float64 d0, d1;
991
992 d0 = d->XMM_D(0);
993 d1 = s->XMM_D(0);
994 ret = float64_compare(d0, d1, &env->sse_status);
995 CC_SRC = comis_eflags[ret + 1];
996}
997
998uint32_t helper_movmskps(Reg *s)
999{
1000 int b0, b1, b2, b3;
1001 b0 = s->XMM_L(0) >> 31;
1002 b1 = s->XMM_L(1) >> 31;
1003 b2 = s->XMM_L(2) >> 31;
1004 b3 = s->XMM_L(3) >> 31;
1005 return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3);
1006}
1007
1008uint32_t helper_movmskpd(Reg *s)
1009{
1010 int b0, b1;
1011 b0 = s->XMM_L(1) >> 31;
1012 b1 = s->XMM_L(3) >> 31;
1013 return b0 | (b1 << 1);
1014}
1015
1016#endif
1017
1018uint32_t glue(helper_pmovmskb, SUFFIX)(Reg *s)
1019{
1020 uint32_t val;
1021 val = 0;
1022 val |= (s->B(0) >> 7);
1023 val |= (s->B(1) >> 6) & 0x02;
1024 val |= (s->B(2) >> 5) & 0x04;
1025 val |= (s->B(3) >> 4) & 0x08;
1026 val |= (s->B(4) >> 3) & 0x10;
1027 val |= (s->B(5) >> 2) & 0x20;
1028 val |= (s->B(6) >> 1) & 0x40;
1029 val |= (s->B(7)) & 0x80;
1030#if SHIFT == 1
1031 val |= (s->B(8) << 1) & 0x0100;
1032 val |= (s->B(9) << 2) & 0x0200;
1033 val |= (s->B(10) << 3) & 0x0400;
1034 val |= (s->B(11) << 4) & 0x0800;
1035 val |= (s->B(12) << 5) & 0x1000;
1036 val |= (s->B(13) << 6) & 0x2000;
1037 val |= (s->B(14) << 7) & 0x4000;
1038 val |= (s->B(15) << 8) & 0x8000;
1039#endif
1040 return val;
1041}
1042
1043void glue(helper_packsswb, SUFFIX) (Reg *d, Reg *s)
1044{
1045 Reg r;
1046
1047 r.B(0) = satsb((int16_t)d->W(0));
1048 r.B(1) = satsb((int16_t)d->W(1));
1049 r.B(2) = satsb((int16_t)d->W(2));
1050 r.B(3) = satsb((int16_t)d->W(3));
1051#if SHIFT == 1
1052 r.B(4) = satsb((int16_t)d->W(4));
1053 r.B(5) = satsb((int16_t)d->W(5));
1054 r.B(6) = satsb((int16_t)d->W(6));
1055 r.B(7) = satsb((int16_t)d->W(7));
1056#endif
1057 r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0));
1058 r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1));
1059 r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2));
1060 r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3));
1061#if SHIFT == 1
1062 r.B(12) = satsb((int16_t)s->W(4));
1063 r.B(13) = satsb((int16_t)s->W(5));
1064 r.B(14) = satsb((int16_t)s->W(6));
1065 r.B(15) = satsb((int16_t)s->W(7));
1066#endif
1067 *d = r;
1068}
1069
1070void glue(helper_packuswb, SUFFIX) (Reg *d, Reg *s)
1071{
1072 Reg r;
1073
1074 r.B(0) = satub((int16_t)d->W(0));
1075 r.B(1) = satub((int16_t)d->W(1));
1076 r.B(2) = satub((int16_t)d->W(2));
1077 r.B(3) = satub((int16_t)d->W(3));
1078#if SHIFT == 1
1079 r.B(4) = satub((int16_t)d->W(4));
1080 r.B(5) = satub((int16_t)d->W(5));
1081 r.B(6) = satub((int16_t)d->W(6));
1082 r.B(7) = satub((int16_t)d->W(7));
1083#endif
1084 r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0));
1085 r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1));
1086 r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2));
1087 r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3));
1088#if SHIFT == 1
1089 r.B(12) = satub((int16_t)s->W(4));
1090 r.B(13) = satub((int16_t)s->W(5));
1091 r.B(14) = satub((int16_t)s->W(6));
1092 r.B(15) = satub((int16_t)s->W(7));
1093#endif
1094 *d = r;
1095}
1096
1097void glue(helper_packssdw, SUFFIX) (Reg *d, Reg *s)
1098{
1099 Reg r;
1100
1101 r.W(0) = satsw(d->L(0));
1102 r.W(1) = satsw(d->L(1));
1103#if SHIFT == 1
1104 r.W(2) = satsw(d->L(2));
1105 r.W(3) = satsw(d->L(3));
1106#endif
1107 r.W((2 << SHIFT) + 0) = satsw(s->L(0));
1108 r.W((2 << SHIFT) + 1) = satsw(s->L(1));
1109#if SHIFT == 1
1110 r.W(6) = satsw(s->L(2));
1111 r.W(7) = satsw(s->L(3));
1112#endif
1113 *d = r;
1114}
1115
1116#define UNPCK_OP(base_name, base) \
1117 \
1118void glue(helper_punpck ## base_name ## bw, SUFFIX) (Reg *d, Reg *s) \
1119{ \
1120 Reg r; \
1121 \
1122 r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
1123 r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
1124 r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
1125 r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
1126 r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
1127 r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
1128 r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
1129 r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
1130XMM_ONLY( \
1131 r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
1132 r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
1133 r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
1134 r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
1135 r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
1136 r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
1137 r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
1138 r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
1139) \
1140 *d = r; \
1141} \
1142 \
1143void glue(helper_punpck ## base_name ## wd, SUFFIX) (Reg *d, Reg *s) \
1144{ \
1145 Reg r; \
1146 \
1147 r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
1148 r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
1149 r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
1150 r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
1151XMM_ONLY( \
1152 r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
1153 r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
1154 r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
1155 r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
1156) \
1157 *d = r; \
1158} \
1159 \
1160void glue(helper_punpck ## base_name ## dq, SUFFIX) (Reg *d, Reg *s) \
1161{ \
1162 Reg r; \
1163 \
1164 r.L(0) = d->L((base << SHIFT) + 0); \
1165 r.L(1) = s->L((base << SHIFT) + 0); \
1166XMM_ONLY( \
1167 r.L(2) = d->L((base << SHIFT) + 1); \
1168 r.L(3) = s->L((base << SHIFT) + 1); \
1169) \
1170 *d = r; \
1171} \
1172 \
1173XMM_ONLY( \
1174void glue(helper_punpck ## base_name ## qdq, SUFFIX) (Reg *d, Reg *s) \
1175{ \
1176 Reg r; \
1177 \
1178 r.Q(0) = d->Q(base); \
1179 r.Q(1) = s->Q(base); \
1180 *d = r; \
1181} \
1182)
1183
1184UNPCK_OP(l, 0)
1185UNPCK_OP(h, 1)
1186
1187/* 3DNow! float ops */
1188#if SHIFT == 0
1189void helper_pi2fd(MMXReg *d, MMXReg *s)
1190{
1191 d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status);
1192 d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status);
1193}
1194
1195void helper_pi2fw(MMXReg *d, MMXReg *s)
1196{
1197 d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status);
1198 d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status);
1199}
1200
1201void helper_pf2id(MMXReg *d, MMXReg *s)
1202{
1203 d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status);
1204 d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status);
1205}
1206
1207void helper_pf2iw(MMXReg *d, MMXReg *s)
1208{
1209 d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status));
1210 d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status));
1211}
1212
1213void helper_pfacc(MMXReg *d, MMXReg *s)
1214{
1215 MMXReg r;
1216 r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
1217 r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
1218 *d = r;
1219}
1220
1221void helper_pfadd(MMXReg *d, MMXReg *s)
1222{
1223 d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
1224 d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
1225}
1226
1227void helper_pfcmpeq(MMXReg *d, MMXReg *s)
1228{
1229 d->MMX_L(0) = float32_eq(d->MMX_S(0), s->MMX_S(0), &env->mmx_status) ? -1 : 0;
1230 d->MMX_L(1) = float32_eq(d->MMX_S(1), s->MMX_S(1), &env->mmx_status) ? -1 : 0;
1231}
1232
1233void helper_pfcmpge(MMXReg *d, MMXReg *s)
1234{
1235 d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0;
1236 d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0;
1237}
1238
1239void helper_pfcmpgt(MMXReg *d, MMXReg *s)
1240{
1241 d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0;
1242 d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0;
1243}
1244
1245void helper_pfmax(MMXReg *d, MMXReg *s)
1246{
1247 if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status))
1248 d->MMX_S(0) = s->MMX_S(0);
1249 if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status))
1250 d->MMX_S(1) = s->MMX_S(1);
1251}
1252
1253void helper_pfmin(MMXReg *d, MMXReg *s)
1254{
1255 if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status))
1256 d->MMX_S(0) = s->MMX_S(0);
1257 if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status))
1258 d->MMX_S(1) = s->MMX_S(1);
1259}
1260
1261void helper_pfmul(MMXReg *d, MMXReg *s)
1262{
1263 d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
1264 d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
1265}
1266
1267void helper_pfnacc(MMXReg *d, MMXReg *s)
1268{
1269 MMXReg r;
1270 r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
1271 r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
1272 *d = r;
1273}
1274
1275void helper_pfpnacc(MMXReg *d, MMXReg *s)
1276{
1277 MMXReg r;
1278 r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
1279 r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
1280 *d = r;
1281}
1282
1283void helper_pfrcp(MMXReg *d, MMXReg *s)
1284{
1285 d->MMX_S(0) = approx_rcp(s->MMX_S(0));
1286 d->MMX_S(1) = d->MMX_S(0);
1287}
1288
1289void helper_pfrsqrt(MMXReg *d, MMXReg *s)
1290{
1291 d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff;
1292 d->MMX_S(1) = approx_rsqrt(d->MMX_S(1));
1293 d->MMX_L(1) |= s->MMX_L(0) & 0x80000000;
1294 d->MMX_L(0) = d->MMX_L(1);
1295}
1296
1297void helper_pfsub(MMXReg *d, MMXReg *s)
1298{
1299 d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
1300 d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
1301}
1302
1303void helper_pfsubr(MMXReg *d, MMXReg *s)
1304{
1305 d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status);
1306 d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status);
1307}
1308
1309void helper_pswapd(MMXReg *d, MMXReg *s)
1310{
1311 MMXReg r;
1312 r.MMX_L(0) = s->MMX_L(1);
1313 r.MMX_L(1) = s->MMX_L(0);
1314 *d = r;
1315}
1316#endif
1317
1318/* SSSE3 op helpers */
1319void glue(helper_pshufb, SUFFIX) (Reg *d, Reg *s)
1320{
1321 int i;
1322 Reg r;
1323
1324 for (i = 0; i < (8 << SHIFT); i++)
1325 r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1)));
1326
1327 *d = r;
1328}
1329
1330void glue(helper_phaddw, SUFFIX) (Reg *d, Reg *s)
1331{
1332 d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
1333 d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
1334 XMM_ONLY(d->W(2) = (int16_t)d->W(4) + (int16_t)d->W(5));
1335 XMM_ONLY(d->W(3) = (int16_t)d->W(6) + (int16_t)d->W(7));
1336 d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1);
1337 d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3);
1338 XMM_ONLY(d->W(6) = (int16_t)s->W(4) + (int16_t)s->W(5));
1339 XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
1340}
1341
1342void glue(helper_phaddd, SUFFIX) (Reg *d, Reg *s)
1343{
1344 d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
1345 XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
1346 d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1);
1347 XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
1348}
1349
1350void glue(helper_phaddsw, SUFFIX) (Reg *d, Reg *s)
1351{
1352 d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
1353 d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
1354 XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5)));
1355 XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7)));
1356 d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1));
1357 d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3));
1358 XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5)));
1359 XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
1360}
1361
1362void glue(helper_pmaddubsw, SUFFIX) (Reg *d, Reg *s)
1363{
1364 d->W(0) = satsw((int8_t)s->B( 0) * (uint8_t)d->B( 0) +
1365 (int8_t)s->B( 1) * (uint8_t)d->B( 1));
1366 d->W(1) = satsw((int8_t)s->B( 2) * (uint8_t)d->B( 2) +
1367 (int8_t)s->B( 3) * (uint8_t)d->B( 3));
1368 d->W(2) = satsw((int8_t)s->B( 4) * (uint8_t)d->B( 4) +
1369 (int8_t)s->B( 5) * (uint8_t)d->B( 5));
1370 d->W(3) = satsw((int8_t)s->B( 6) * (uint8_t)d->B( 6) +
1371 (int8_t)s->B( 7) * (uint8_t)d->B( 7));
1372#if SHIFT == 1
1373 d->W(4) = satsw((int8_t)s->B( 8) * (uint8_t)d->B( 8) +
1374 (int8_t)s->B( 9) * (uint8_t)d->B( 9));
1375 d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) +
1376 (int8_t)s->B(11) * (uint8_t)d->B(11));
1377 d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) +
1378 (int8_t)s->B(13) * (uint8_t)d->B(13));
1379 d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) +
1380 (int8_t)s->B(15) * (uint8_t)d->B(15));
1381#endif
1382}
1383
1384void glue(helper_phsubw, SUFFIX) (Reg *d, Reg *s)
1385{
1386 d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1);
1387 d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3);
1388 XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5));
1389 XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7));
1390 d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1);
1391 d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3);
1392 XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5));
1393 XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7));
1394}
1395
1396void glue(helper_phsubd, SUFFIX) (Reg *d, Reg *s)
1397{
1398 d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1);
1399 XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3));
1400 d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1);
1401 XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3));
1402}
1403
1404void glue(helper_phsubsw, SUFFIX) (Reg *d, Reg *s)
1405{
1406 d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1));
1407 d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3));
1408 XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5)));
1409 XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7)));
1410 d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1));
1411 d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3));
1412 XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5)));
1413 XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7)));
1414}
1415
1416#define FABSB(_, x) x > INT8_MAX ? -(int8_t ) x : x
1417#define FABSW(_, x) x > INT16_MAX ? -(int16_t) x : x
1418#define FABSL(_, x) x > INT32_MAX ? -(int32_t) x : x
1419SSE_HELPER_B(helper_pabsb, FABSB)
1420SSE_HELPER_W(helper_pabsw, FABSW)
1421SSE_HELPER_L(helper_pabsd, FABSL)
1422
1423#define FMULHRSW(d, s) ((int16_t) d * (int16_t) s + 0x4000) >> 15
1424SSE_HELPER_W(helper_pmulhrsw, FMULHRSW)
1425
1426#define FSIGNB(d, s) s <= INT8_MAX ? s ? d : 0 : -(int8_t ) d
1427#define FSIGNW(d, s) s <= INT16_MAX ? s ? d : 0 : -(int16_t) d
1428#define FSIGNL(d, s) s <= INT32_MAX ? s ? d : 0 : -(int32_t) d
1429SSE_HELPER_B(helper_psignb, FSIGNB)
1430SSE_HELPER_W(helper_psignw, FSIGNW)
1431SSE_HELPER_L(helper_psignd, FSIGNL)
1432
1433void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift)
1434{
1435 Reg r;
1436
1437 /* XXX could be checked during translation */
1438 if (shift >= (16 << SHIFT)) {
1439 r.Q(0) = 0;
1440 XMM_ONLY(r.Q(1) = 0);
1441 } else {
1442 shift <<= 3;
1443#define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
1444#if SHIFT == 0
1445 r.Q(0) = SHR(s->Q(0), shift - 0) |
1446 SHR(d->Q(0), shift - 64);
1447#else
1448 r.Q(0) = SHR(s->Q(0), shift - 0) |
1449 SHR(s->Q(1), shift - 64) |
1450 SHR(d->Q(0), shift - 128) |
1451 SHR(d->Q(1), shift - 192);
1452 r.Q(1) = SHR(s->Q(0), shift + 64) |
1453 SHR(s->Q(1), shift - 0) |
1454 SHR(d->Q(0), shift - 64) |
1455 SHR(d->Q(1), shift - 128);
1456#endif
1457#undef SHR
1458 }
1459
1460 *d = r;
1461}
1462
1463#define XMM0 env->xmm_regs[0]
1464
1465#if SHIFT == 1
1466#define SSE_HELPER_V(name, elem, num, F)\
1467void glue(name, SUFFIX) (Reg *d, Reg *s)\
1468{\
1469 d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0));\
1470 d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1));\
1471 if (num > 2) {\
1472 d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2));\
1473 d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3));\
1474 if (num > 4) {\
1475 d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4));\
1476 d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5));\
1477 d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6));\
1478 d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7));\
1479 if (num > 8) {\
1480 d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8));\
1481 d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9));\
1482 d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10));\
1483 d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11));\
1484 d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12));\
1485 d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13));\
1486 d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14));\
1487 d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15));\
1488 }\
1489 }\
1490 }\
1491}
1492
1493#define SSE_HELPER_I(name, elem, num, F)\
1494void glue(name, SUFFIX) (Reg *d, Reg *s, uint32_t imm)\
1495{\
1496 d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1));\
1497 d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1));\
1498 if (num > 2) {\
1499 d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1));\
1500 d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1));\
1501 if (num > 4) {\
1502 d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1));\
1503 d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1));\
1504 d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1));\
1505 d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1));\
1506 if (num > 8) {\
1507 d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1));\
1508 d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1));\
1509 d->elem(10) = F(d->elem(10), s->elem(10), ((imm >> 10) & 1));\
1510 d->elem(11) = F(d->elem(11), s->elem(11), ((imm >> 11) & 1));\
1511 d->elem(12) = F(d->elem(12), s->elem(12), ((imm >> 12) & 1));\
1512 d->elem(13) = F(d->elem(13), s->elem(13), ((imm >> 13) & 1));\
1513 d->elem(14) = F(d->elem(14), s->elem(14), ((imm >> 14) & 1));\
1514 d->elem(15) = F(d->elem(15), s->elem(15), ((imm >> 15) & 1));\
1515 }\
1516 }\
1517 }\
1518}
1519
1520/* SSE4.1 op helpers */
1521#define FBLENDVB(d, s, m) (m & 0x80) ? s : d
1522#define FBLENDVPS(d, s, m) (m & 0x80000000) ? s : d
1523#define FBLENDVPD(d, s, m) (m & 0x8000000000000000LL) ? s : d
1524SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB)
1525SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS)
1526SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD)
1527
1528void glue(helper_ptest, SUFFIX) (Reg *d, Reg *s)
1529{
1530 uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1));
1531 uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1));
1532
1533 CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C);
1534}
1535
1536#define SSE_HELPER_F(name, elem, num, F)\
1537void glue(name, SUFFIX) (Reg *d, Reg *s)\
1538{\
1539 d->elem(0) = F(0);\
1540 d->elem(1) = F(1);\
1541 if (num > 2) {\
1542 d->elem(2) = F(2);\
1543 d->elem(3) = F(3);\
1544 if (num > 4) {\
1545 d->elem(4) = F(4);\
1546 d->elem(5) = F(5);\
1547 d->elem(6) = F(6);\
1548 d->elem(7) = F(7);\
1549 }\
1550 }\
1551}
1552
1553SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B)
1554SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B)
1555SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B)
1556SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W)
1557SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W)
1558SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L)
1559SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B)
1560SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B)
1561SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B)
1562SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W)
1563SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W)
1564SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L)
1565
1566void glue(helper_pmuldq, SUFFIX) (Reg *d, Reg *s)
1567{
1568 d->Q(0) = (int64_t) (int32_t) d->L(0) * (int32_t) s->L(0);
1569 d->Q(1) = (int64_t) (int32_t) d->L(2) * (int32_t) s->L(2);
1570}
1571
1572#define FCMPEQQ(d, s) d == s ? -1 : 0
1573SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ)
1574
1575void glue(helper_packusdw, SUFFIX) (Reg *d, Reg *s)
1576{
1577 d->W(0) = satuw((int32_t) d->L(0));
1578 d->W(1) = satuw((int32_t) d->L(1));
1579 d->W(2) = satuw((int32_t) d->L(2));
1580 d->W(3) = satuw((int32_t) d->L(3));
1581 d->W(4) = satuw((int32_t) s->L(0));
1582 d->W(5) = satuw((int32_t) s->L(1));
1583 d->W(6) = satuw((int32_t) s->L(2));
1584 d->W(7) = satuw((int32_t) s->L(3));
1585}
1586
1587#define FMINSB(d, s) MIN((int8_t) d, (int8_t) s)
1588#define FMINSD(d, s) MIN((int32_t) d, (int32_t) s)
1589#define FMAXSB(d, s) MAX((int8_t) d, (int8_t) s)
1590#define FMAXSD(d, s) MAX((int32_t) d, (int32_t) s)
1591SSE_HELPER_B(helper_pminsb, FMINSB)
1592SSE_HELPER_L(helper_pminsd, FMINSD)
1593SSE_HELPER_W(helper_pminuw, MIN)
1594SSE_HELPER_L(helper_pminud, MIN)
1595SSE_HELPER_B(helper_pmaxsb, FMAXSB)
1596SSE_HELPER_L(helper_pmaxsd, FMAXSD)
1597SSE_HELPER_W(helper_pmaxuw, MAX)
1598SSE_HELPER_L(helper_pmaxud, MAX)
1599
1600#define FMULLD(d, s) (int32_t) d * (int32_t) s
1601SSE_HELPER_L(helper_pmulld, FMULLD)
1602
1603void glue(helper_phminposuw, SUFFIX) (Reg *d, Reg *s)
1604{
1605 int idx = 0;
1606
1607 if (s->W(1) < s->W(idx))
1608 idx = 1;
1609 if (s->W(2) < s->W(idx))
1610 idx = 2;
1611 if (s->W(3) < s->W(idx))
1612 idx = 3;
1613 if (s->W(4) < s->W(idx))
1614 idx = 4;
1615 if (s->W(5) < s->W(idx))
1616 idx = 5;
1617 if (s->W(6) < s->W(idx))
1618 idx = 6;
1619 if (s->W(7) < s->W(idx))
1620 idx = 7;
1621
1622 d->Q(1) = 0;
1623 d->L(1) = 0;
1624 d->W(1) = idx;
1625 d->W(0) = s->W(idx);
1626}
1627
1628void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1629{
1630 signed char prev_rounding_mode;
1631
1632 prev_rounding_mode = env->sse_status.float_rounding_mode;
1633 if (!(mode & (1 << 2)))
1634 switch (mode & 3) {
1635 case 0:
1636 set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1637 break;
1638 case 1:
1639 set_float_rounding_mode(float_round_down, &env->sse_status);
1640 break;
1641 case 2:
1642 set_float_rounding_mode(float_round_up, &env->sse_status);
1643 break;
1644 case 3:
1645 set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1646 break;
1647 }
1648
1649 d->L(0) = float64_round_to_int(s->L(0), &env->sse_status);
1650 d->L(1) = float64_round_to_int(s->L(1), &env->sse_status);
1651 d->L(2) = float64_round_to_int(s->L(2), &env->sse_status);
1652 d->L(3) = float64_round_to_int(s->L(3), &env->sse_status);
1653
1654#if 0 /* TODO */
1655 if (mode & (1 << 3))
1656 set_float_exception_flags(
1657 get_float_exception_flags(&env->sse_status) &
1658 ~float_flag_inexact,
1659 &env->sse_status);
1660#endif
1661 env->sse_status.float_rounding_mode = prev_rounding_mode;
1662}
1663
1664void glue(helper_roundpd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1665{
1666 signed char prev_rounding_mode;
1667
1668 prev_rounding_mode = env->sse_status.float_rounding_mode;
1669 if (!(mode & (1 << 2)))
1670 switch (mode & 3) {
1671 case 0:
1672 set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1673 break;
1674 case 1:
1675 set_float_rounding_mode(float_round_down, &env->sse_status);
1676 break;
1677 case 2:
1678 set_float_rounding_mode(float_round_up, &env->sse_status);
1679 break;
1680 case 3:
1681 set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1682 break;
1683 }
1684
1685 d->Q(0) = float64_round_to_int(s->Q(0), &env->sse_status);
1686 d->Q(1) = float64_round_to_int(s->Q(1), &env->sse_status);
1687
1688#if 0 /* TODO */
1689 if (mode & (1 << 3))
1690 set_float_exception_flags(
1691 get_float_exception_flags(&env->sse_status) &
1692 ~float_flag_inexact,
1693 &env->sse_status);
1694#endif
1695 env->sse_status.float_rounding_mode = prev_rounding_mode;
1696}
1697
1698void glue(helper_roundss, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1699{
1700 signed char prev_rounding_mode;
1701
1702 prev_rounding_mode = env->sse_status.float_rounding_mode;
1703 if (!(mode & (1 << 2)))
1704 switch (mode & 3) {
1705 case 0:
1706 set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1707 break;
1708 case 1:
1709 set_float_rounding_mode(float_round_down, &env->sse_status);
1710 break;
1711 case 2:
1712 set_float_rounding_mode(float_round_up, &env->sse_status);
1713 break;
1714 case 3:
1715 set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1716 break;
1717 }
1718
1719 d->L(0) = float64_round_to_int(s->L(0), &env->sse_status);
1720
1721#if 0 /* TODO */
1722 if (mode & (1 << 3))
1723 set_float_exception_flags(
1724 get_float_exception_flags(&env->sse_status) &
1725 ~float_flag_inexact,
1726 &env->sse_status);
1727#endif
1728 env->sse_status.float_rounding_mode = prev_rounding_mode;
1729}
1730
1731void glue(helper_roundsd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
1732{
1733 signed char prev_rounding_mode;
1734
1735 prev_rounding_mode = env->sse_status.float_rounding_mode;
1736 if (!(mode & (1 << 2)))
1737 switch (mode & 3) {
1738 case 0:
1739 set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
1740 break;
1741 case 1:
1742 set_float_rounding_mode(float_round_down, &env->sse_status);
1743 break;
1744 case 2:
1745 set_float_rounding_mode(float_round_up, &env->sse_status);
1746 break;
1747 case 3:
1748 set_float_rounding_mode(float_round_to_zero, &env->sse_status);
1749 break;
1750 }
1751
1752 d->Q(0) = float64_round_to_int(s->Q(0), &env->sse_status);
1753
1754#if 0 /* TODO */
1755 if (mode & (1 << 3))
1756 set_float_exception_flags(
1757 get_float_exception_flags(&env->sse_status) &
1758 ~float_flag_inexact,
1759 &env->sse_status);
1760#endif
1761 env->sse_status.float_rounding_mode = prev_rounding_mode;
1762}
1763
1764#define FBLENDP(d, s, m) m ? s : d
1765SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
1766SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
1767SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
1768
1769void glue(helper_dpps, SUFFIX) (Reg *d, Reg *s, uint32_t mask)
1770{
1771 float32 iresult = 0 /*float32_zero*/;
1772
1773 if (mask & (1 << 4))
1774 iresult = float32_add(iresult,
1775 float32_mul(d->L(0), s->L(0), &env->sse_status),
1776 &env->sse_status);
1777 if (mask & (1 << 5))
1778 iresult = float32_add(iresult,
1779 float32_mul(d->L(1), s->L(1), &env->sse_status),
1780 &env->sse_status);
1781 if (mask & (1 << 6))
1782 iresult = float32_add(iresult,
1783 float32_mul(d->L(2), s->L(2), &env->sse_status),
1784 &env->sse_status);
1785 if (mask & (1 << 7))
1786 iresult = float32_add(iresult,
1787 float32_mul(d->L(3), s->L(3), &env->sse_status),
1788 &env->sse_status);
1789 d->L(0) = (mask & (1 << 0)) ? iresult : 0 /*float32_zero*/;
1790 d->L(1) = (mask & (1 << 1)) ? iresult : 0 /*float32_zero*/;
1791 d->L(2) = (mask & (1 << 2)) ? iresult : 0 /*float32_zero*/;
1792 d->L(3) = (mask & (1 << 3)) ? iresult : 0 /*float32_zero*/;
1793}
1794
1795void glue(helper_dppd, SUFFIX) (Reg *d, Reg *s, uint32_t mask)
1796{
1797 float64 iresult = 0 /*float64_zero*/;
1798
1799 if (mask & (1 << 4))
1800 iresult = float64_add(iresult,
1801 float64_mul(d->Q(0), s->Q(0), &env->sse_status),
1802 &env->sse_status);
1803 if (mask & (1 << 5))
1804 iresult = float64_add(iresult,
1805 float64_mul(d->Q(1), s->Q(1), &env->sse_status),
1806 &env->sse_status);
1807 d->Q(0) = (mask & (1 << 0)) ? iresult : 0 /*float64_zero*/;
1808 d->Q(1) = (mask & (1 << 1)) ? iresult : 0 /*float64_zero*/;
1809}
1810
1811void glue(helper_mpsadbw, SUFFIX) (Reg *d, Reg *s, uint32_t offset)
1812{
1813 int s0 = (offset & 3) << 2;
1814 int d0 = (offset & 4) << 0;
1815 int i;
1816 Reg r;
1817
1818 for (i = 0; i < 8; i++, d0++) {
1819 r.W(i) = 0;
1820 r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0));
1821 r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1));
1822 r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2));
1823 r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3));
1824 }
1825
1826 *d = r;
1827}
1828
1829/* SSE4.2 op helpers */
1830/* it's unclear whether signed or unsigned */
1831#define FCMPGTQ(d, s) d > s ? -1 : 0
1832SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ)
1833
1834static inline int pcmp_elen(int reg, uint32_t ctrl)
1835{
1836 int val;
1837
1838 /* Presence of REX.W is indicated by a bit higher than 7 set */
1839 if (ctrl >> 8)
1840 val = abs1((int64_t) env->regs[reg]);
1841 else
1842 val = abs1((int32_t) env->regs[reg]);
1843
1844 if (ctrl & 1) {
1845 if (val > 8)
1846 return 8;
1847 } else
1848 if (val > 16)
1849 return 16;
1850
1851 return val;
1852}
1853
1854static inline int pcmp_ilen(Reg *r, uint8_t ctrl)
1855{
1856 int val = 0;
1857
1858 if (ctrl & 1) {
1859 while (val < 8 && r->W(val))
1860 val++;
1861 } else
1862 while (val < 16 && r->B(val))
1863 val++;
1864
1865 return val;
1866}
1867
1868static inline int pcmp_val(Reg *r, uint8_t ctrl, int i)
1869{
1870 switch ((ctrl >> 0) & 3) {
1871 case 0:
1872 return r->B(i);
1873 case 1:
1874 return r->W(i);
1875 case 2:
1876 return (int8_t) r->B(i);
1877 case 3:
1878 default:
1879 return (int16_t) r->W(i);
1880 }
1881}
1882
1883static inline unsigned pcmpxstrx(Reg *d, Reg *s,
1884 int8_t ctrl, int valids, int validd)
1885{
1886 unsigned int res = 0;
1887 int v;
1888 int j, i;
1889 int upper = (ctrl & 1) ? 7 : 15;
1890
1891 valids--;
1892 validd--;
1893
1894 CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0);
1895
1896 switch ((ctrl >> 2) & 3) {
1897 case 0:
1898 for (j = valids; j >= 0; j--) {
1899 res <<= 1;
1900 v = pcmp_val(s, ctrl, j);
1901 for (i = validd; i >= 0; i--)
1902 res |= (v == pcmp_val(d, ctrl, i));
1903 }
1904 break;
1905 case 1:
1906 for (j = valids; j >= 0; j--) {
1907 res <<= 1;
1908 v = pcmp_val(s, ctrl, j);
1909 for (i = ((validd - 1) | 1); i >= 0; i -= 2)
1910 res |= (pcmp_val(d, ctrl, i - 0) <= v &&
1911 pcmp_val(d, ctrl, i - 1) >= v);
1912 }
1913 break;
1914 case 2:
1915 res = (2 << (upper - MAX(valids, validd))) - 1;
1916 res <<= MAX(valids, validd) - MIN(valids, validd);
1917 for (i = MIN(valids, validd); i >= 0; i--) {
1918 res <<= 1;
1919 v = pcmp_val(s, ctrl, i);
1920 res |= (v == pcmp_val(d, ctrl, i));
1921 }
1922 break;
1923 case 3:
1924 for (j = valids - validd; j >= 0; j--) {
1925 res <<= 1;
1926 res |= 1;
1927 for (i = MIN(upper - j, validd); i >= 0; i--)
1928 res &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
1929 }
1930 break;
1931 }
1932
1933 switch ((ctrl >> 4) & 3) {
1934 case 1:
1935 res ^= (2 << upper) - 1;
1936 break;
1937 case 3:
1938 res ^= (2 << valids) - 1;
1939 break;
1940 }
1941
1942 if (res)
1943 CC_SRC |= CC_C;
1944 if (res & 1)
1945 CC_SRC |= CC_O;
1946
1947 return res;
1948}
1949
1950static inline int rffs1(unsigned int val)
1951{
1952 int ret = 1, hi;
1953
1954 for (hi = sizeof(val) * 4; hi; hi /= 2)
1955 if (val >> hi) {
1956 val >>= hi;
1957 ret += hi;
1958 }
1959
1960 return ret;
1961}
1962
1963static inline int ffs1(unsigned int val)
1964{
1965 int ret = 1, hi;
1966
1967 for (hi = sizeof(val) * 4; hi; hi /= 2)
1968 if (val << hi) {
1969 val <<= hi;
1970 ret += hi;
1971 }
1972
1973 return ret;
1974}
1975
1976void glue(helper_pcmpestri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
1977{
1978 unsigned int res = pcmpxstrx(d, s, ctrl,
1979 pcmp_elen(R_EDX, ctrl),
1980 pcmp_elen(R_EAX, ctrl));
1981
1982 if (res)
1983#ifndef VBOX
1984 env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1;
1985#else
1986 env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1(res) : ffs1(res)) - 1;
1987#endif
1988 else
1989 env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
1990}
1991
1992void glue(helper_pcmpestrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
1993{
1994 int i;
1995 unsigned int res = pcmpxstrx(d, s, ctrl,
1996 pcmp_elen(R_EDX, ctrl),
1997 pcmp_elen(R_EAX, ctrl));
1998
1999 if ((ctrl >> 6) & 1) {
2000#ifndef VBOX
2001 if (ctrl & 1)
2002 for (i = 0; i <= 8; i--, res >>= 1)
2003 d->W(i) = (res & 1) ? ~0 : 0;
2004 else
2005 for (i = 0; i <= 16; i--, res >>= 1)
2006 d->B(i) = (res & 1) ? ~0 : 0;
2007#else
2008 if (ctrl & 1)
2009 for (i = 0; i < 8; i++, res >>= 1) {
2010 d->W(i) = (res & 1) ? ~0 : 0;
2011 }
2012 else
2013 for (i = 0; i < 16; i++, res >>= 1) {
2014 d->B(i) = (res & 1) ? ~0 : 0;
2015 }
2016#endif
2017 } else {
2018 d->Q(1) = 0;
2019 d->Q(0) = res;
2020 }
2021}
2022
2023void glue(helper_pcmpistri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
2024{
2025 unsigned int res = pcmpxstrx(d, s, ctrl,
2026 pcmp_ilen(s, ctrl),
2027 pcmp_ilen(d, ctrl));
2028
2029 if (res)
2030 env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1;
2031 else
2032 env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
2033}
2034
2035void glue(helper_pcmpistrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
2036{
2037 int i;
2038 unsigned int res = pcmpxstrx(d, s, ctrl,
2039 pcmp_ilen(s, ctrl),
2040 pcmp_ilen(d, ctrl));
2041
2042 if ((ctrl >> 6) & 1) {
2043#ifndef VBOX
2044 if (ctrl & 1)
2045 for (i = 0; i <= 8; i--, res >>= 1)
2046 d->W(i) = (res & 1) ? ~0 : 0;
2047 else
2048 for (i = 0; i <= 16; i--, res >>= 1)
2049 d->B(i) = (res & 1) ? ~0 : 0;
2050#else
2051 if (ctrl & 1)
2052 for (i = 0; i < 8; i++, res >>= 1) {
2053 d->W(i) = (res & 1) ? ~0 : 0;
2054 }
2055 else
2056 for (i = 0; i < 16; i++, res >>= 1) {
2057 d->B(i) = (res & 1) ? ~0 : 0;
2058 }
2059#endif
2060 } else {
2061 d->Q(1) = 0;
2062 d->Q(0) = res;
2063 }
2064}
2065
2066#define CRCPOLY 0x1edc6f41
2067#define CRCPOLY_BITREV 0x82f63b78
2068target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len)
2069{
2070 target_ulong crc = (msg & ((target_ulong) -1 >>
2071 (TARGET_LONG_BITS - len))) ^ crc1;
2072
2073 while (len--)
2074 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0);
2075
2076 return crc;
2077}
2078
2079#define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
2080#define POPCOUNT(n, i) (n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i))
2081target_ulong helper_popcnt(target_ulong n, uint32_t type)
2082{
2083 CC_SRC = n ? 0 : CC_Z;
2084
2085 n = POPCOUNT(n, 0);
2086 n = POPCOUNT(n, 1);
2087 n = POPCOUNT(n, 2);
2088 n = POPCOUNT(n, 3);
2089 if (type == 1)
2090 return n & 0xff;
2091
2092 n = POPCOUNT(n, 4);
2093#ifndef TARGET_X86_64
2094 return n;
2095#else
2096 if (type == 2)
2097 return n & 0xff;
2098
2099 return POPCOUNT(n, 5);
2100#endif
2101}
2102#endif
2103
2104#undef SHIFT
2105#undef XMM_ONLY
2106#undef Reg
2107#undef B
2108#undef W
2109#undef L
2110#undef Q
2111#undef SUFFIX
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette