VirtualBox

source: vbox/trunk/src/recompiler_new/tcg/i386/tcg-target.c@ 14925

Last change on this file since 14925 was 14916, checked in by vboxsync, 16 years ago

fixed math related failures, auto stack alignment fixes

File size: 42.9 KB
Line 
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24/*
25 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
26 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
27 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
28 * a choice of LGPL license versions is made available with the language indicating
29 * that LGPLv2 or any later version may be used, or where a choice of which version
30 * of the LGPL is applied is otherwise unspecified.
31 */
32#ifndef NDEBUG
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34 "%eax",
35 "%ecx",
36 "%edx",
37 "%ebx",
38 "%esp",
39 "%ebp",
40 "%esi",
41 "%edi",
42};
43#endif
44
45static const int tcg_target_reg_alloc_order[] = {
46 TCG_REG_EAX,
47 TCG_REG_EDX,
48 TCG_REG_ECX,
49 TCG_REG_EBX,
50#ifndef VBOX
51 TCG_REG_ESI,
52#endif
53 TCG_REG_EDI,
54 TCG_REG_EBP,
55};
56
57static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX };
58static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
59
60static uint8_t *tb_ret_addr;
61
62static void patch_reloc(uint8_t *code_ptr, int type,
63 tcg_target_long value, tcg_target_long addend)
64{
65 value += addend;
66 switch(type) {
67 case R_386_32:
68 *(uint32_t *)code_ptr = value;
69 break;
70 case R_386_PC32:
71 *(uint32_t *)code_ptr = value - (long)code_ptr;
72 break;
73 default:
74 tcg_abort();
75 }
76}
77
78/* maximum number of register used for input function arguments */
79#ifndef VBOX
80static inline int tcg_target_get_call_iarg_regs_count(int flags)
81#else /* VBOX */
82DECLINLINE(int) tcg_target_get_call_iarg_regs_count(int flags)
83#endif /* VBOX */
84{
85 flags &= TCG_CALL_TYPE_MASK;
86 switch(flags) {
87 case TCG_CALL_TYPE_STD:
88 return 0;
89 case TCG_CALL_TYPE_REGPARM_1:
90 case TCG_CALL_TYPE_REGPARM_2:
91 case TCG_CALL_TYPE_REGPARM:
92 return flags - TCG_CALL_TYPE_REGPARM_1 + 1;
93 default:
94 tcg_abort();
95 }
96}
97
98/* parse target specific constraints */
99static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
100{
101 const char *ct_str;
102
103 ct_str = *pct_str;
104 switch(ct_str[0]) {
105 case 'a':
106 ct->ct |= TCG_CT_REG;
107 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX);
108 break;
109 case 'b':
110 ct->ct |= TCG_CT_REG;
111 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
112 break;
113 case 'c':
114 ct->ct |= TCG_CT_REG;
115 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
116 break;
117 case 'd':
118 ct->ct |= TCG_CT_REG;
119 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX);
120 break;
121 case 'S':
122 ct->ct |= TCG_CT_REG;
123 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI);
124 break;
125 case 'D':
126 ct->ct |= TCG_CT_REG;
127 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI);
128 break;
129 case 'q':
130 ct->ct |= TCG_CT_REG;
131 tcg_regset_set32(ct->u.regs, 0, 0xf);
132 break;
133 case 'r':
134 ct->ct |= TCG_CT_REG;
135 tcg_regset_set32(ct->u.regs, 0, 0xff);
136 break;
137
138 /* qemu_ld/st address constraint */
139 case 'L':
140 ct->ct |= TCG_CT_REG;
141 tcg_regset_set32(ct->u.regs, 0, 0xff);
142 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX);
143 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX);
144 break;
145 default:
146 return -1;
147 }
148 ct_str++;
149 *pct_str = ct_str;
150 return 0;
151}
152
153/* test if a constant matches the constraint */
154#ifndef VBOX
155static inline int tcg_target_const_match(tcg_target_long val,
156#else /* VBOX */
157DECLINLINE(int) tcg_target_const_match(tcg_target_long val,
158#endif /* VBOX */
159 const TCGArgConstraint *arg_ct)
160{
161 int ct;
162 ct = arg_ct->ct;
163 if (ct & TCG_CT_CONST)
164 return 1;
165 else
166 return 0;
167}
168
169#define ARITH_ADD 0
170#define ARITH_OR 1
171#define ARITH_ADC 2
172#define ARITH_SBB 3
173#define ARITH_AND 4
174#define ARITH_SUB 5
175#define ARITH_XOR 6
176#define ARITH_CMP 7
177
178#define SHIFT_SHL 4
179#define SHIFT_SHR 5
180#define SHIFT_SAR 7
181
182#define JCC_JMP (-1)
183#define JCC_JO 0x0
184#define JCC_JNO 0x1
185#define JCC_JB 0x2
186#define JCC_JAE 0x3
187#define JCC_JE 0x4
188#define JCC_JNE 0x5
189#define JCC_JBE 0x6
190#define JCC_JA 0x7
191#define JCC_JS 0x8
192#define JCC_JNS 0x9
193#define JCC_JP 0xa
194#define JCC_JNP 0xb
195#define JCC_JL 0xc
196#define JCC_JGE 0xd
197#define JCC_JLE 0xe
198#define JCC_JG 0xf
199
200#define P_EXT 0x100 /* 0x0f opcode prefix */
201
202#if !defined(VBOX) || !defined(_MSC_VER)
203static const uint8_t tcg_cond_to_jcc[10] = {
204 [TCG_COND_EQ] = JCC_JE,
205 [TCG_COND_NE] = JCC_JNE,
206 [TCG_COND_LT] = JCC_JL,
207 [TCG_COND_GE] = JCC_JGE,
208 [TCG_COND_LE] = JCC_JLE,
209 [TCG_COND_GT] = JCC_JG,
210 [TCG_COND_LTU] = JCC_JB,
211 [TCG_COND_GEU] = JCC_JAE,
212 [TCG_COND_LEU] = JCC_JBE,
213 [TCG_COND_GTU] = JCC_JA,
214};
215#else
216/* Fortunately, ordering is right */
217static const uint8_t tcg_cond_to_jcc[10] = {
218 JCC_JE,
219 JCC_JNE,
220 JCC_JL,
221 JCC_JGE,
222 JCC_JLE,
223 JCC_JG,
224 JCC_JB,
225 JCC_JAE,
226 JCC_JBE,
227 JCC_JA,
228};
229#endif
230
231#ifndef VBOX
232static inline void tcg_out_opc(TCGContext *s, int opc)
233#else /* VBOX */
234DECLINLINE(void) tcg_out_opc(TCGContext *s, int opc)
235#endif /* VBOX */
236{
237 if (opc & P_EXT)
238 tcg_out8(s, 0x0f);
239 tcg_out8(s, opc);
240}
241
242#ifndef VBOX
243static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
244#else /* VBOX */
245DECLINLINE(void) tcg_out_modrm(TCGContext *s, int opc, int r, int rm)
246#endif /* VBOX */
247{
248 tcg_out_opc(s, opc);
249 tcg_out8(s, 0xc0 | (r << 3) | rm);
250}
251
252/* rm == -1 means no register index */
253#ifndef VBOX
254static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
255#else /* VBOX */
256DECLINLINE(void) tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm,
257#endif /* VBOX */
258 int32_t offset)
259{
260 tcg_out_opc(s, opc);
261 if (rm == -1) {
262 tcg_out8(s, 0x05 | (r << 3));
263 tcg_out32(s, offset);
264 } else if (offset == 0 && rm != TCG_REG_EBP) {
265 if (rm == TCG_REG_ESP) {
266 tcg_out8(s, 0x04 | (r << 3));
267 tcg_out8(s, 0x24);
268 } else {
269 tcg_out8(s, 0x00 | (r << 3) | rm);
270 }
271 } else if ((int8_t)offset == offset) {
272 if (rm == TCG_REG_ESP) {
273 tcg_out8(s, 0x44 | (r << 3));
274 tcg_out8(s, 0x24);
275 } else {
276 tcg_out8(s, 0x40 | (r << 3) | rm);
277 }
278 tcg_out8(s, offset);
279 } else {
280 if (rm == TCG_REG_ESP) {
281 tcg_out8(s, 0x84 | (r << 3));
282 tcg_out8(s, 0x24);
283 } else {
284 tcg_out8(s, 0x80 | (r << 3) | rm);
285 }
286 tcg_out32(s, offset);
287 }
288}
289
290#ifndef VBOX
291static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
292#else /* VBOX */
293DECLINLINE(void) tcg_out_mov(TCGContext *s, int ret, int arg)
294#endif /* VBOX */
295{
296 if (arg != ret)
297 tcg_out_modrm(s, 0x8b, ret, arg);
298}
299
300#ifndef VBOX
301static inline void tcg_out_movi(TCGContext *s, TCGType type,
302#else /* VBOX */
303DECLINLINE(void) tcg_out_movi(TCGContext *s, TCGType type,
304#endif /* VBOX */
305 int ret, int32_t arg)
306{
307 if (arg == 0) {
308 /* xor r0,r0 */
309 tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret);
310 } else {
311 tcg_out8(s, 0xb8 + ret);
312 tcg_out32(s, arg);
313 }
314}
315
316#ifndef VBOX
317static inline void tcg_out_push(TCGContext *s, int reg)
318#else /* VBOX */
319DECLINLINE(void) tcg_out_push(TCGContext *s, int reg)
320#endif /* VBOX */
321{
322 tcg_out_opc(s, 0x50 + reg);
323}
324
325#ifndef VBOX
326static inline void tcg_out_pop(TCGContext *s, int reg)
327#else /* VBOX */
328DECLINLINE(void) tcg_out_pop(TCGContext *s, int reg)
329#endif /* VBOX */
330{
331 tcg_out_opc(s, 0x58 + reg);
332}
333
334#ifndef VBOX
335static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
336#else /* VBOX */
337DECLINLINE(void) tcg_out_ld(TCGContext *s, TCGType type, int ret,
338#endif /* VBOX */
339 int arg1, tcg_target_long arg2)
340{
341 /* movl */
342 tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2);
343}
344
345#ifndef VBOX
346static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
347#else /* VBOX */
348DECLINLINE(void) tcg_out_st(TCGContext *s, TCGType type, int arg,
349#endif /* VBOX */
350 int arg1, tcg_target_long arg2)
351{
352 /* movl */
353 tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2);
354}
355
356#ifndef VBOX
357static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val)
358#else /* VBOX */
359DECLINLINE(void) tgen_arithi(TCGContext *s, int c, int r0, int32_t val)
360#endif /* VBOX */
361{
362 if (val == (int8_t)val) {
363 tcg_out_modrm(s, 0x83, c, r0);
364 tcg_out8(s, val);
365 } else {
366 tcg_out_modrm(s, 0x81, c, r0);
367 tcg_out32(s, val);
368 }
369}
370
371void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
372{
373 if (val != 0)
374 tgen_arithi(s, ARITH_ADD, reg, val);
375}
376
377static void tcg_out_jxx(TCGContext *s, int opc, int label_index)
378{
379 int32_t val, val1;
380 TCGLabel *l = &s->labels[label_index];
381
382 if (l->has_value) {
383 val = l->u.value - (tcg_target_long)s->code_ptr;
384 val1 = val - 2;
385 if ((int8_t)val1 == val1) {
386 if (opc == -1)
387 tcg_out8(s, 0xeb);
388 else
389 tcg_out8(s, 0x70 + opc);
390 tcg_out8(s, val1);
391 } else {
392 if (opc == -1) {
393 tcg_out8(s, 0xe9);
394 tcg_out32(s, val - 5);
395 } else {
396 tcg_out8(s, 0x0f);
397 tcg_out8(s, 0x80 + opc);
398 tcg_out32(s, val - 6);
399 }
400 }
401 } else {
402 if (opc == -1) {
403 tcg_out8(s, 0xe9);
404 } else {
405 tcg_out8(s, 0x0f);
406 tcg_out8(s, 0x80 + opc);
407 }
408 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4);
409 s->code_ptr += 4;
410 }
411}
412
413static void tcg_out_brcond(TCGContext *s, int cond,
414 TCGArg arg1, TCGArg arg2, int const_arg2,
415 int label_index)
416{
417 if (const_arg2) {
418 if (arg2 == 0) {
419 /* test r, r */
420 tcg_out_modrm(s, 0x85, arg1, arg1);
421 } else {
422 tgen_arithi(s, ARITH_CMP, arg1, arg2);
423 }
424 } else {
425 tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1);
426 }
427 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index);
428}
429
430#ifdef VBOX
431DECLINLINE(void)
432tcg_out_long_call(TCGContext *s, void* dst)
433{
434 intptr_t disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5;
435 tcg_out8(s, 0xe8); /* call disp32 */
436 tcg_out32(s, disp); /* disp32 */
437}
438DECLINLINE(void)
439tcg_out_long_jmp(TCGContext *s, void* dst)
440{
441 intptr_t disp = (uintptr_t)dst - (uintptr_t)s->code_ptr - 5;
442 tcg_out8(s, 0xe9); /* jmp disp32 */
443 tcg_out32(s, disp); /* disp32 */
444}
445#endif /* VBOX */
446
447
448/* XXX: we implement it at the target level to avoid having to
449 handle cross basic blocks temporaries */
450static void tcg_out_brcond2(TCGContext *s,
451 const TCGArg *args, const int *const_args)
452{
453 int label_next;
454 label_next = gen_new_label();
455 switch(args[4]) {
456 case TCG_COND_EQ:
457 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next);
458 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3], args[5]);
459 break;
460 case TCG_COND_NE:
461 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], args[5]);
462 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], args[5]);
463 break;
464 case TCG_COND_LT:
465 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
466 tcg_out_jxx(s, JCC_JNE, label_next);
467 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]);
468 break;
469 case TCG_COND_LE:
470 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]);
471 tcg_out_jxx(s, JCC_JNE, label_next);
472 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]);
473 break;
474 case TCG_COND_GT:
475 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
476 tcg_out_jxx(s, JCC_JNE, label_next);
477 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]);
478 break;
479 case TCG_COND_GE:
480 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]);
481 tcg_out_jxx(s, JCC_JNE, label_next);
482 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]);
483 break;
484 case TCG_COND_LTU:
485 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
486 tcg_out_jxx(s, JCC_JNE, label_next);
487 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]);
488 break;
489 case TCG_COND_LEU:
490 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]);
491 tcg_out_jxx(s, JCC_JNE, label_next);
492 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]);
493 break;
494 case TCG_COND_GTU:
495 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
496 tcg_out_jxx(s, JCC_JNE, label_next);
497 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]);
498 break;
499 case TCG_COND_GEU:
500 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]);
501 tcg_out_jxx(s, JCC_JNE, label_next);
502 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]);
503 break;
504 default:
505 tcg_abort();
506 }
507 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr);
508}
509
510#if defined(CONFIG_SOFTMMU)
511
512#include "../../softmmu_defs.h"
513
514static void *qemu_ld_helpers[4] = {
515 __ldb_mmu,
516 __ldw_mmu,
517 __ldl_mmu,
518 __ldq_mmu,
519};
520
521static void *qemu_st_helpers[4] = {
522 __stb_mmu,
523 __stw_mmu,
524 __stl_mmu,
525 __stq_mmu,
526};
527#endif
528
529#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
530static void *vbox_ld_helpers[] = {
531 remR3PhysReadU8,
532 remR3PhysReadU16,
533 remR3PhysReadU32,
534 remR3PhysReadU64,
535 remR3PhysReadS8,
536 remR3PhysReadS16,
537 remR3PhysReadS32,
538 remR3PhysReadS64,
539};
540
541static void *vbox_st_helpers[] = {
542 remR3PhysWriteU8,
543 remR3PhysWriteU16,
544 remR3PhysWriteU32,
545 remR3PhysWriteU64
546};
547
548#ifdef RT_OS_DARWIN
549
550/* could be any register, not used for argument passing */
551#define BIAS_REG TCG_REG_EDI
552/* Call out stack alignement, must be power of 2 for mask below to be right */
553#define CALLOUT_FRAME_ALIGN 16
554#define CALLOUT_FRAME_MASK ~(CALLOUT_FRAME_ALIGN - 1)
555
556static void tcg_align_frame_pre(TCGContext* s, int args_size)
557{
558 int8_t bias;
559
560 bias = CALLOUT_FRAME_ALIGN - args_size;
561
562 while (bias <= 0)
563 bias += CALLOUT_FRAME_ALIGN;
564
565 tcg_out_push(s, BIAS_REG);
566 tcg_out_opc(s, 0x8d); tcg_out8(s, 0x74 | (BIAS_REG <<3)); tcg_out8(s, 0x20 | TCG_REG_ESP); tcg_out8(s, -args_size-4); /* lea -(args_size+4)(%esp),%bias */
567 tcg_out8(s, 0x83); tcg_out8(s, 0xe0 | BIAS_REG); tcg_out8(s, CALLOUT_FRAME_MASK); /* andl $CALLOUT_FRAME_MASK, %bias */
568 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), TCG_REG_ESP, BIAS_REG); /* sub %esp,%bias */
569 tcg_out8(s, 0x8d); tcg_out8(s, 0x64); tcg_out8(s, TCG_REG_ESP |(BIAS_REG << 3)); tcg_out8(s, args_size+4);/* lea args_size+4(%esp,%bias),%esp */
570 tcg_out_push(s, BIAS_REG);
571 /* restore old bias_reg value, so nobody notices */
572 tcg_out8(s, 0xf7); tcg_out8(s, 0xd8 | BIAS_REG); /* neg bias_reg */
573 tcg_out8(s, 0x8b); tcg_out8(s, 0x44 | (BIAS_REG << 3)); tcg_out8(s, TCG_REG_ESP |(BIAS_REG << 3)); tcg_out8(s, -args_size);/* mov -args_size(%esp,%bias),%bias */
574
575 /* how stack is ready for args to be pushed */
576}
577
578static void tcg_align_frame_post(TCGContext* s, int args_size)
579{
580 tcg_out8(s, 0x8b); tcg_out8(s, 0x44 | (BIAS_REG << 3)); tcg_out8(s, TCG_REG_ESP |(TCG_REG_ESP << 3)); tcg_out8(s, args_size);/* mov args_size(%esp),%bias */
581 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), BIAS_REG, TCG_REG_ESP); /* sub %bias, %esp */
582 tcg_out_pop(s, BIAS_REG);
583}
584
585static void tcg_out_vbox_phys_read(TCGContext *s, int index,
586 int addr_reg,
587 int data_reg, int data_reg2)
588{
589 int useReg2 = ((index & 3) == 3);
590
591 /** @todo: should we make phys addess accessors fastcalls - probably not a big deal */
592 /* out parameter (address), note that phys address is always 64-bit */
593 AssertMsg(sizeof(RTGCPHYS) == 8, ("Physical address must be 64-bits, update caller\n"));
594 tcg_align_frame_pre(s, 8);
595
596 /* push arguments */
597 tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */
598 tcg_out_push(s, addr_reg);
599
600 tcg_out_long_call(s, vbox_ld_helpers[index]);
601 tcg_align_frame_post(s, 8);
602
603 tcg_out_mov(s, data_reg, TCG_REG_EAX);
604 /* returned 64-bit value */
605 if (useReg2)
606 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
607}
608static void tcg_out_vbox_phys_write(TCGContext *s, int index,
609 int addr_reg,
610 int val_reg, int val_reg2) {
611 int use_reg2 = ((index & 3) == 3);
612 int args_size = 8 + (use_reg2 ? 8 : 4);
613 int temp_val_reg = 0;
614
615 /** @todo: should we make phys addess accessors fastcalls - probably not a big deal */
616 tcg_align_frame_pre(s, args_size);
617 /* out parameter (value2) */
618 if (use_reg2)
619 tcg_out_push(s, val_reg2);
620 /* out parameter (value) */
621 tcg_out_push(s, val_reg);
622 /* out parameter (address), note that phys address is always 64-bit */
623 AssertMsg(sizeof(RTGCPHYS) == 8, ("Physical address must be 64-bits, update caller\n"));
624 tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */
625 tcg_out_push(s, addr_reg);
626
627 /* call it */
628 tcg_out_long_call(s, vbox_st_helpers[index]);
629
630 tcg_align_frame_post(s, args_size);
631}
632
633#else
634static void tcg_out_vbox_phys_read(TCGContext *s, int index,
635 int addr_reg,
636 int data_reg, int data_reg2)
637{
638 int useReg2 = ((index & 3) == 3);
639
640 /** @todo: should we make phys addess accessors fastcalls - probably not a big deal */
641 /* out parameter (address), note that phys address is always 64-bit */
642 AssertMsg(sizeof(RTGCPHYS) == 8, ("Physical address must be 64-bits, update caller\n"));
643 tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */
644 tcg_out_push(s, addr_reg);
645
646 tcg_out_long_call(s, vbox_ld_helpers[index]);
647
648 /* mov %eax, data_reg */
649 tcg_out_mov(s, data_reg, TCG_REG_EAX);
650
651 /* returned 64-bit value */
652 if (useReg2)
653 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
654
655 /* clean stack after us */
656 tcg_out_addi(s, TCG_REG_ESP, 8);
657}
658
659static void tcg_out_vbox_phys_write(TCGContext *s, int index,
660 int addr_reg,
661 int val_reg, int val_reg2) {
662 int useReg2 = ((index & 3) == 3);
663
664 /** @todo: should we make phys addess accessors fastcalls - probably not a big deal */
665 /* out parameter (value2) */
666 if (useReg2)
667 tcg_out_push(s, val_reg2);
668 /* out parameter (value) */
669 tcg_out_push(s, val_reg);
670 /* out parameter (address), note that phys address is always 64-bit */
671 AssertMsg(sizeof(RTGCPHYS) == 8, ("Physical address must be 64-bits, update caller\n"));
672 tcg_out8(s, 0x6a); tcg_out8(s, 0x00); /* push $0 */
673 tcg_out_push(s, addr_reg);
674
675 /* call it */
676 tcg_out_long_call(s, vbox_st_helpers[index]);
677
678 /* clean stack after us */
679 tcg_out_addi(s, TCG_REG_ESP, 8 + (useReg2 ? 8 : 4));
680}
681#endif
682
683#endif
684
685/* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and
686 EAX. It will be useful once fixed registers globals are less
687 common. */
688static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
689 int opc)
690{
691 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
692#if defined(CONFIG_SOFTMMU)
693 uint8_t *label1_ptr, *label2_ptr;
694#endif
695#if TARGET_LONG_BITS == 64
696#if defined(CONFIG_SOFTMMU)
697 uint8_t *label3_ptr;
698#endif
699 int addr_reg2;
700#endif
701
702 data_reg = *args++;
703 if (opc == 3)
704 data_reg2 = *args++;
705 else
706 data_reg2 = 0;
707 addr_reg = *args++;
708#if TARGET_LONG_BITS == 64
709 addr_reg2 = *args++;
710#endif
711 mem_index = *args;
712 s_bits = opc & 3;
713
714 r0 = TCG_REG_EAX;
715 r1 = TCG_REG_EDX;
716
717#if defined(CONFIG_SOFTMMU)
718 tcg_out_mov(s, r1, addr_reg);
719
720 tcg_out_mov(s, r0, addr_reg);
721
722 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
723 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
724
725 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
726 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
727
728 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
729 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
730
731#ifndef VBOX
732 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
733 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
734 tcg_out8(s, (5 << 3) | r1);
735 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
736#else
737 tcg_out_opc(s, 0x8d); /* lea offset(r1, env), r1 */
738 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
739 tcg_out8(s, (TCG_AREG0 << 3) | r1);
740 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read));
741#endif
742
743 /* cmp 0(r1), r0 */
744 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
745
746 tcg_out_mov(s, r0, addr_reg);
747
748#if TARGET_LONG_BITS == 32
749 /* je label1 */
750 tcg_out8(s, 0x70 + JCC_JE);
751 label1_ptr = s->code_ptr;
752 s->code_ptr++;
753#else
754 /* jne label3 */
755 tcg_out8(s, 0x70 + JCC_JNE);
756 label3_ptr = s->code_ptr;
757 s->code_ptr++;
758
759 /* cmp 4(r1), addr_reg2 */
760 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
761
762 /* je label1 */
763 tcg_out8(s, 0x70 + JCC_JE);
764 label1_ptr = s->code_ptr;
765 s->code_ptr++;
766
767 /* label3: */
768 *label3_ptr = s->code_ptr - label3_ptr - 1;
769#endif
770
771 /* XXX: move that code at the end of the TB */
772#if TARGET_LONG_BITS == 32
773 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index);
774#else
775 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
776 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
777#endif
778 tcg_out8(s, 0xe8);
779 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] -
780 (tcg_target_long)s->code_ptr - 4);
781
782 switch(opc) {
783 case 0 | 4:
784 /* movsbl */
785 tcg_out_modrm(s, 0xbe | P_EXT, data_reg, TCG_REG_EAX);
786 break;
787 case 1 | 4:
788 /* movswl */
789 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, TCG_REG_EAX);
790 break;
791 case 0:
792 case 1:
793 case 2:
794 default:
795 tcg_out_mov(s, data_reg, TCG_REG_EAX);
796 break;
797 case 3:
798 if (data_reg == TCG_REG_EDX) {
799 tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */
800 tcg_out_mov(s, data_reg2, TCG_REG_EAX);
801 } else {
802 tcg_out_mov(s, data_reg, TCG_REG_EAX);
803 tcg_out_mov(s, data_reg2, TCG_REG_EDX);
804 }
805 break;
806 }
807
808 /* jmp label2 */
809 tcg_out8(s, 0xeb);
810 label2_ptr = s->code_ptr;
811 s->code_ptr++;
812
813 /* label1: */
814 *label1_ptr = s->code_ptr - label1_ptr - 1;
815
816 /* add x(r1), r0 */
817 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
818 offsetof(CPUTLBEntry, addr_read));
819#else
820 r0 = addr_reg;
821#endif
822
823#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
824#ifdef TARGET_WORDS_BIGENDIAN
825 bswap = 1;
826#else
827 bswap = 0;
828#endif
829 switch(opc) {
830 case 0:
831 /* movzbl */
832 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0);
833 break;
834 case 0 | 4:
835 /* movsbl */
836 tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, 0);
837 break;
838 case 1:
839 /* movzwl */
840 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0);
841 if (bswap) {
842 /* rolw $8, data_reg */
843 tcg_out8(s, 0x66);
844 tcg_out_modrm(s, 0xc1, 0, data_reg);
845 tcg_out8(s, 8);
846 }
847 break;
848 case 1 | 4:
849 /* movswl */
850 tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, 0);
851 if (bswap) {
852 /* rolw $8, data_reg */
853 tcg_out8(s, 0x66);
854 tcg_out_modrm(s, 0xc1, 0, data_reg);
855 tcg_out8(s, 8);
856
857 /* movswl data_reg, data_reg */
858 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, data_reg);
859 }
860 break;
861 case 2:
862 /* movl (r0), data_reg */
863 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
864 if (bswap) {
865 /* bswap */
866 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
867 }
868 break;
869 case 3:
870 /* XXX: could be nicer */
871 if (r0 == data_reg) {
872 r1 = TCG_REG_EDX;
873 if (r1 == data_reg)
874 r1 = TCG_REG_EAX;
875 tcg_out_mov(s, r1, r0);
876 r0 = r1;
877 }
878 if (!bswap) {
879 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0);
880 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 4);
881 } else {
882 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 4);
883 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT);
884
885 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 0);
886 /* bswap */
887 tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT);
888 }
889 break;
890 default:
891 tcg_abort();
892 }
893#else /* VBOX */
894 tcg_out_vbox_phys_read(s, opc, r0, data_reg, data_reg2);
895#endif
896
897
898#if defined(CONFIG_SOFTMMU)
899 /* label2: */
900 *label2_ptr = s->code_ptr - label2_ptr - 1;
901#endif
902}
903
904
905static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
906 int opc)
907{
908 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
909#if defined(CONFIG_SOFTMMU)
910 uint8_t *label1_ptr, *label2_ptr;
911#endif
912#if TARGET_LONG_BITS == 64
913#if defined(CONFIG_SOFTMMU)
914 uint8_t *label3_ptr;
915#endif
916 int addr_reg2;
917#endif
918
919 data_reg = *args++;
920 if (opc == 3)
921 data_reg2 = *args++;
922 else
923 data_reg2 = 0;
924 addr_reg = *args++;
925#if TARGET_LONG_BITS == 64
926 addr_reg2 = *args++;
927#endif
928 mem_index = *args;
929
930 s_bits = opc;
931
932 r0 = TCG_REG_EAX;
933 r1 = TCG_REG_EDX;
934
935#if defined(CONFIG_SOFTMMU)
936 tcg_out_mov(s, r1, addr_reg);
937
938 tcg_out_mov(s, r0, addr_reg);
939
940 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */
941 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
942
943 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */
944 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
945
946 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */
947 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
948
949#ifndef VBOX
950 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */
951 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
952 tcg_out8(s, (5 << 3) | r1);
953 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
954#else
955 tcg_out_opc(s, 0x8d); /* lea offset(r1, env), r1 */
956 tcg_out8(s, 0x80 | (r1 << 3) | 0x04);
957 tcg_out8(s, (TCG_AREG0 << 3) | r1);
958 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write));
959#endif
960
961 /* cmp 0(r1), r0 */
962 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0);
963
964 tcg_out_mov(s, r0, addr_reg);
965
966#if TARGET_LONG_BITS == 32
967 /* je label1 */
968 tcg_out8(s, 0x70 + JCC_JE);
969 label1_ptr = s->code_ptr;
970 s->code_ptr++;
971#else
972 /* jne label3 */
973 tcg_out8(s, 0x70 + JCC_JNE);
974 label3_ptr = s->code_ptr;
975 s->code_ptr++;
976
977 /* cmp 4(r1), addr_reg2 */
978 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4);
979
980 /* je label1 */
981 tcg_out8(s, 0x70 + JCC_JE);
982 label1_ptr = s->code_ptr;
983 s->code_ptr++;
984
985 /* label3: */
986 *label3_ptr = s->code_ptr - label3_ptr - 1;
987#endif
988
989 /* XXX: move that code at the end of the TB */
990#if TARGET_LONG_BITS == 32
991 if (opc == 3) {
992 tcg_out_mov(s, TCG_REG_EDX, data_reg);
993 tcg_out_mov(s, TCG_REG_ECX, data_reg2);
994 tcg_out8(s, 0x6a); /* push Ib */
995 tcg_out8(s, mem_index);
996 tcg_out8(s, 0xe8);
997 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
998 (tcg_target_long)s->code_ptr - 4);
999 tcg_out_addi(s, TCG_REG_ESP, 4);
1000 } else {
1001 switch(opc) {
1002 case 0:
1003 /* movzbl */
1004 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_EDX, data_reg);
1005 break;
1006 case 1:
1007 /* movzwl */
1008 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_EDX, data_reg);
1009 break;
1010 case 2:
1011 tcg_out_mov(s, TCG_REG_EDX, data_reg);
1012 break;
1013 }
1014 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index);
1015 tcg_out8(s, 0xe8);
1016 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
1017 (tcg_target_long)s->code_ptr - 4);
1018 }
1019#else
1020 if (opc == 3) {
1021 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
1022 tcg_out8(s, 0x6a); /* push Ib */
1023 tcg_out8(s, mem_index);
1024 tcg_out_opc(s, 0x50 + data_reg2); /* push */
1025 tcg_out_opc(s, 0x50 + data_reg); /* push */
1026 tcg_out8(s, 0xe8);
1027 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
1028 (tcg_target_long)s->code_ptr - 4);
1029 tcg_out_addi(s, TCG_REG_ESP, 12);
1030 } else {
1031 tcg_out_mov(s, TCG_REG_EDX, addr_reg2);
1032 switch(opc) {
1033 case 0:
1034 /* movzbl */
1035 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_ECX, data_reg);
1036 break;
1037 case 1:
1038 /* movzwl */
1039 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_ECX, data_reg);
1040 break;
1041 case 2:
1042 tcg_out_mov(s, TCG_REG_ECX, data_reg);
1043 break;
1044 }
1045 tcg_out8(s, 0x6a); /* push Ib */
1046 tcg_out8(s, mem_index);
1047 tcg_out8(s, 0xe8);
1048 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] -
1049 (tcg_target_long)s->code_ptr - 4);
1050 tcg_out_addi(s, TCG_REG_ESP, 4);
1051 }
1052#endif
1053
1054 /* jmp label2 */
1055 tcg_out8(s, 0xeb);
1056 label2_ptr = s->code_ptr;
1057 s->code_ptr++;
1058
1059 /* label1: */
1060 *label1_ptr = s->code_ptr - label1_ptr - 1;
1061
1062 /* add x(r1), r0 */
1063 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) -
1064 offsetof(CPUTLBEntry, addr_write));
1065#else
1066 r0 = addr_reg;
1067#endif
1068
1069#if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB)
1070#ifdef TARGET_WORDS_BIGENDIAN
1071 bswap = 1;
1072#else
1073 bswap = 0;
1074#endif
1075 switch(opc) {
1076 case 0:
1077 /* movb */
1078 tcg_out_modrm_offset(s, 0x88, data_reg, r0, 0);
1079 break;
1080 case 1:
1081 if (bswap) {
1082 tcg_out_mov(s, r1, data_reg);
1083 tcg_out8(s, 0x66); /* rolw $8, %ecx */
1084 tcg_out_modrm(s, 0xc1, 0, r1);
1085 tcg_out8(s, 8);
1086 data_reg = r1;
1087 }
1088 /* movw */
1089 tcg_out8(s, 0x66);
1090 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
1091 break;
1092 case 2:
1093 if (bswap) {
1094 tcg_out_mov(s, r1, data_reg);
1095 /* bswap data_reg */
1096 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
1097 data_reg = r1;
1098 }
1099 /* movl */
1100 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
1101 break;
1102 case 3:
1103 if (bswap) {
1104 tcg_out_mov(s, r1, data_reg2);
1105 /* bswap data_reg */
1106 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
1107 tcg_out_modrm_offset(s, 0x89, r1, r0, 0);
1108 tcg_out_mov(s, r1, data_reg);
1109 /* bswap data_reg */
1110 tcg_out_opc(s, (0xc8 + r1) | P_EXT);
1111 tcg_out_modrm_offset(s, 0x89, r1, r0, 4);
1112 } else {
1113 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0);
1114 tcg_out_modrm_offset(s, 0x89, data_reg2, r0, 4);
1115 }
1116 break;
1117 default:
1118 tcg_abort();
1119 }
1120#else
1121 tcg_out_vbox_phys_write(s, opc, r0, data_reg, data_reg2);
1122#endif
1123
1124#if defined(CONFIG_SOFTMMU)
1125 /* label2: */
1126 *label2_ptr = s->code_ptr - label2_ptr - 1;
1127#endif
1128}
1129
1130#ifndef VBOX
1131static inline void tcg_out_op(TCGContext *s, int opc,
1132#else /* VBOX */
1133DECLINLINE(void) tcg_out_op(TCGContext *s, int opc,
1134#endif /* VBOX */
1135 const TCGArg *args, const int *const_args)
1136{
1137 int c;
1138
1139 switch(opc) {
1140 case INDEX_op_exit_tb:
1141 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]);
1142 tcg_out8(s, 0xe9); /* jmp tb_ret_addr */
1143 tcg_out32(s, tb_ret_addr - s->code_ptr - 4);
1144 break;
1145 case INDEX_op_goto_tb:
1146 if (s->tb_jmp_offset) {
1147 /* direct jump method */
1148 tcg_out8(s, 0xe9); /* jmp im */
1149 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1150 tcg_out32(s, 0);
1151 } else {
1152 /* indirect jump method */
1153 /* jmp Ev */
1154 tcg_out_modrm_offset(s, 0xff, 4, -1,
1155 (tcg_target_long)(s->tb_next + args[0]));
1156 }
1157 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1158 break;
1159 case INDEX_op_call:
1160 if (const_args[0]) {
1161 tcg_out8(s, 0xe8);
1162 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
1163 } else {
1164 tcg_out_modrm(s, 0xff, 2, args[0]);
1165 }
1166 break;
1167 case INDEX_op_jmp:
1168 if (const_args[0]) {
1169 tcg_out8(s, 0xe9);
1170 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);
1171 } else {
1172 tcg_out_modrm(s, 0xff, 4, args[0]);
1173 }
1174 break;
1175 case INDEX_op_br:
1176 tcg_out_jxx(s, JCC_JMP, args[0]);
1177 break;
1178 case INDEX_op_movi_i32:
1179 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
1180 break;
1181 case INDEX_op_ld8u_i32:
1182 /* movzbl */
1183 tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]);
1184 break;
1185 case INDEX_op_ld8s_i32:
1186 /* movsbl */
1187 tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]);
1188 break;
1189 case INDEX_op_ld16u_i32:
1190 /* movzwl */
1191 tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]);
1192 break;
1193 case INDEX_op_ld16s_i32:
1194 /* movswl */
1195 tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]);
1196 break;
1197 case INDEX_op_ld_i32:
1198 /* movl */
1199 tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]);
1200 break;
1201 case INDEX_op_st8_i32:
1202 /* movb */
1203 tcg_out_modrm_offset(s, 0x88, args[0], args[1], args[2]);
1204 break;
1205 case INDEX_op_st16_i32:
1206 /* movw */
1207 tcg_out8(s, 0x66);
1208 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
1209 break;
1210 case INDEX_op_st_i32:
1211 /* movl */
1212 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]);
1213 break;
1214 case INDEX_op_sub_i32:
1215 c = ARITH_SUB;
1216 goto gen_arith;
1217 case INDEX_op_and_i32:
1218 c = ARITH_AND;
1219 goto gen_arith;
1220 case INDEX_op_or_i32:
1221 c = ARITH_OR;
1222 goto gen_arith;
1223 case INDEX_op_xor_i32:
1224 c = ARITH_XOR;
1225 goto gen_arith;
1226 case INDEX_op_add_i32:
1227 c = ARITH_ADD;
1228 gen_arith:
1229 if (const_args[2]) {
1230 tgen_arithi(s, c, args[0], args[2]);
1231 } else {
1232 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]);
1233 }
1234 break;
1235 case INDEX_op_mul_i32:
1236 if (const_args[2]) {
1237 int32_t val;
1238 val = args[2];
1239 if (val == (int8_t)val) {
1240 tcg_out_modrm(s, 0x6b, args[0], args[0]);
1241 tcg_out8(s, val);
1242 } else {
1243 tcg_out_modrm(s, 0x69, args[0], args[0]);
1244 tcg_out32(s, val);
1245 }
1246 } else {
1247 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]);
1248 }
1249 break;
1250 case INDEX_op_mulu2_i32:
1251 tcg_out_modrm(s, 0xf7, 4, args[3]);
1252 break;
1253 case INDEX_op_div2_i32:
1254 tcg_out_modrm(s, 0xf7, 7, args[4]);
1255 break;
1256 case INDEX_op_divu2_i32:
1257 tcg_out_modrm(s, 0xf7, 6, args[4]);
1258 break;
1259 case INDEX_op_shl_i32:
1260 c = SHIFT_SHL;
1261 gen_shift32:
1262 if (const_args[2]) {
1263 if (args[2] == 1) {
1264 tcg_out_modrm(s, 0xd1, c, args[0]);
1265 } else {
1266 tcg_out_modrm(s, 0xc1, c, args[0]);
1267 tcg_out8(s, args[2]);
1268 }
1269 } else {
1270 tcg_out_modrm(s, 0xd3, c, args[0]);
1271 }
1272 break;
1273 case INDEX_op_shr_i32:
1274 c = SHIFT_SHR;
1275 goto gen_shift32;
1276 case INDEX_op_sar_i32:
1277 c = SHIFT_SAR;
1278 goto gen_shift32;
1279
1280 case INDEX_op_add2_i32:
1281 if (const_args[4])
1282 tgen_arithi(s, ARITH_ADD, args[0], args[4]);
1283 else
1284 tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]);
1285 if (const_args[5])
1286 tgen_arithi(s, ARITH_ADC, args[1], args[5]);
1287 else
1288 tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]);
1289 break;
1290 case INDEX_op_sub2_i32:
1291 if (const_args[4])
1292 tgen_arithi(s, ARITH_SUB, args[0], args[4]);
1293 else
1294 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]);
1295 if (const_args[5])
1296 tgen_arithi(s, ARITH_SBB, args[1], args[5]);
1297 else
1298 tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]);
1299 break;
1300 case INDEX_op_brcond_i32:
1301 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1302 break;
1303 case INDEX_op_brcond2_i32:
1304 tcg_out_brcond2(s, args, const_args);
1305 break;
1306
1307 case INDEX_op_qemu_ld8u:
1308 tcg_out_qemu_ld(s, args, 0);
1309 break;
1310 case INDEX_op_qemu_ld8s:
1311 tcg_out_qemu_ld(s, args, 0 | 4);
1312 break;
1313 case INDEX_op_qemu_ld16u:
1314 tcg_out_qemu_ld(s, args, 1);
1315 break;
1316 case INDEX_op_qemu_ld16s:
1317 tcg_out_qemu_ld(s, args, 1 | 4);
1318 break;
1319 case INDEX_op_qemu_ld32u:
1320 tcg_out_qemu_ld(s, args, 2);
1321 break;
1322 case INDEX_op_qemu_ld64:
1323 tcg_out_qemu_ld(s, args, 3);
1324 break;
1325
1326 case INDEX_op_qemu_st8:
1327 tcg_out_qemu_st(s, args, 0);
1328 break;
1329 case INDEX_op_qemu_st16:
1330 tcg_out_qemu_st(s, args, 1);
1331 break;
1332 case INDEX_op_qemu_st32:
1333 tcg_out_qemu_st(s, args, 2);
1334 break;
1335 case INDEX_op_qemu_st64:
1336 tcg_out_qemu_st(s, args, 3);
1337 break;
1338
1339 default:
1340 tcg_abort();
1341 }
1342}
1343
1344static const TCGTargetOpDef x86_op_defs[] = {
1345 { INDEX_op_exit_tb, {"", "" } },
1346 { INDEX_op_goto_tb, {"", "" } },
1347 { INDEX_op_call, { "ri", "", } },
1348 { INDEX_op_jmp, { "ri", ""} },
1349 { INDEX_op_br, {"", "" } },
1350 { INDEX_op_mov_i32, { "r", "r" } },
1351 { INDEX_op_movi_i32, { "r" } },
1352 { INDEX_op_ld8u_i32, { "r", "r" } },
1353 { INDEX_op_ld8s_i32, { "r", "r" } },
1354 { INDEX_op_ld16u_i32, { "r", "r" } },
1355 { INDEX_op_ld16s_i32, { "r", "r" } },
1356 { INDEX_op_ld_i32, { "r", "r" } },
1357 { INDEX_op_st8_i32, { "q", "r" } },
1358 { INDEX_op_st16_i32, { "r", "r" } },
1359 { INDEX_op_st_i32, { "r", "r" } },
1360
1361 { INDEX_op_add_i32, { "r", "0", "ri" } },
1362 { INDEX_op_sub_i32, { "r", "0", "ri" } },
1363 { INDEX_op_mul_i32, { "r", "0", "ri" } },
1364 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },
1365 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } },
1366 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } },
1367 { INDEX_op_and_i32, { "r", "0", "ri" } },
1368 { INDEX_op_or_i32, { "r", "0", "ri" } },
1369 { INDEX_op_xor_i32, { "r", "0", "ri" } },
1370
1371 { INDEX_op_shl_i32, { "r", "0", "ci" } },
1372 { INDEX_op_shr_i32, { "r", "0", "ci" } },
1373 { INDEX_op_sar_i32, { "r", "0", "ci" } },
1374
1375 { INDEX_op_brcond_i32, { "r", "ri" } },
1376
1377 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1378 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } },
1379 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } },
1380
1381#if TARGET_LONG_BITS == 32
1382 { INDEX_op_qemu_ld8u, { "r", "L" } },
1383 { INDEX_op_qemu_ld8s, { "r", "L" } },
1384 { INDEX_op_qemu_ld16u, { "r", "L" } },
1385 { INDEX_op_qemu_ld16s, { "r", "L" } },
1386 { INDEX_op_qemu_ld32u, { "r", "L" } },
1387 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1388
1389 { INDEX_op_qemu_st8, { "cb", "L" } },
1390 { INDEX_op_qemu_st16, { "L", "L" } },
1391 { INDEX_op_qemu_st32, { "L", "L" } },
1392 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1393#else
1394 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1395 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1396 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1397 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1398 { INDEX_op_qemu_ld32u, { "r", "L", "L" } },
1399 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1400
1401 { INDEX_op_qemu_st8, { "cb", "L", "L" } },
1402 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1403 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1404 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1405#endif
1406#ifndef VBOX
1407 { -1 },
1408#else
1409 { -1, {"", "", "", ""} },
1410#endif
1411};
1412
1413static int tcg_target_callee_save_regs[] = {
1414#ifndef VBOX
1415 /* TCG_REG_EBP, */ /* currently used for the global env, so no
1416 need to save */
1417 TCG_REG_EBX,
1418 TCG_REG_ESI,
1419 TCG_REG_EDI,
1420#else
1421 TCG_REG_EBP,
1422 TCG_REG_EBX,
1423 /* TCG_REG_ESI, */ /* currently used for the global env, so no
1424 need to save */
1425 TCG_REG_EDI,
1426#endif
1427};
1428
1429/* Generate global QEMU prologue and epilogue code */
1430void tcg_target_qemu_prologue(TCGContext *s)
1431{
1432 int i, frame_size, push_size, stack_addend;
1433
1434 /* TB prologue */
1435 /* save all callee saved registers */
1436 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1437 tcg_out_push(s, tcg_target_callee_save_regs[i]);
1438 }
1439 /* reserve some stack space */
1440 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1441 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE;
1442 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) &
1443 ~(TCG_TARGET_STACK_ALIGN - 1);
1444 stack_addend = frame_size - push_size;
1445 tcg_out_addi(s, TCG_REG_ESP, -stack_addend);
1446
1447 tcg_out_modrm(s, 0xff, 4, TCG_REG_EAX); /* jmp *%eax */
1448
1449 /* TB epilogue */
1450 tb_ret_addr = s->code_ptr;
1451 tcg_out_addi(s, TCG_REG_ESP, stack_addend);
1452 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) {
1453 tcg_out_pop(s, tcg_target_callee_save_regs[i]);
1454 }
1455 tcg_out8(s, 0xc3); /* ret */
1456}
1457
1458void tcg_target_init(TCGContext *s)
1459{
1460 /* fail safe */
1461 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1462 tcg_abort();
1463
1464 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff);
1465 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1466 (1 << TCG_REG_EAX) |
1467 (1 << TCG_REG_EDX) |
1468 (1 << TCG_REG_ECX));
1469
1470 tcg_regset_clear(s->reserved_regs);
1471 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP);
1472
1473 tcg_add_target_add_op_defs(x86_op_defs);
1474}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette