Changeset 8826 in vbox
- Timestamp:
- May 15, 2008 1:11:24 AM (17 years ago)
- svn:sync-xref-src-repo-rev:
- 30833
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/target-i386/translate.c
r6726 r8826 1 1 /* 2 2 * i386 translation 3 * 3 * 4 4 * Copyright (c) 2003 Fabrice Bellard 5 5 * … … 145 145 /* i386 arith/logic operations */ 146 146 enum { 147 OP_ADDL, 148 OP_ORL, 149 OP_ADCL, 147 OP_ADDL, 148 OP_ORL, 149 OP_ADCL, 150 150 OP_SBBL, 151 OP_ANDL, 152 OP_SUBL, 153 OP_XORL, 151 OP_ANDL, 152 OP_SUBL, 153 OP_XORL, 154 154 OP_CMPL, 155 155 }; … … 157 157 /* i386 shift ops */ 158 158 enum { 159 OP_ROL, 160 OP_ROR, 161 OP_RCL, 162 OP_RCR, 163 OP_SHL, 164 OP_SHR, 159 OP_ROL, 160 OP_ROR, 161 OP_RCL, 162 OP_RCR, 163 OP_SHL, 164 OP_SHR, 165 165 OP_SHL1, /* undocumented */ 166 166 OP_SAR = 7, … … 180 180 OT_BYTE = 0, 181 181 OT_WORD, 182 OT_LONG, 182 OT_LONG, 183 183 OT_QUAD, 184 184 }; … … 369 369 }; 370 370 371 static GenOpFunc *gen_op_mov_TN_reg[NB_OP_SIZES][2][CPU_NB_REGS] = 371 static GenOpFunc *gen_op_mov_TN_reg[NB_OP_SIZES][2][CPU_NB_REGS] = 372 372 { 373 373 [OT_BYTE] = { … … 940 940 X86_64_ONLY(gen_op_jnz_ecxq), 941 941 }; 942 942 943 943 static GenOpFunc1 *gen_op_jz_ecx[3] = { 944 944 gen_op_jz_ecxw, … … 1028 1028 gen_op_addq_ESI_T0(); 1029 1029 gen_op_addq_EDI_T0(); 1030 } else 1030 } else 1031 1031 #endif 1032 1032 if (s->aflag) { … … 1071 1071 if (s->aflag == 2) { 1072 1072 gen_op_addq_EDI_T0(); 1073 } else 1073 } else 1074 1074 #endif 1075 1075 if (s->aflag) { … … 1089 1089 if (s->aflag == 2) { 1090 1090 gen_op_addq_ESI_T0(); 1091 } else 1091 } else 1092 1092 #endif 1093 1093 if (s->aflag) { … … 1108 1108 if (s->aflag == 2) { 1109 1109 gen_op_addq_EDI_T0(); 1110 } else 1110 } else 1111 1111 #endif 1112 1112 if (s->aflag) { … … 1129 1129 gen_op_addq_ESI_T0(); 1130 1130 gen_op_addq_EDI_T0(); 1131 } else 1131 } else 1132 1132 #endif 1133 1133 if (s->aflag) { … … 1151 1151 if (s->aflag == 2) { 1152 1152 gen_op_addq_EDI_T0(); 1153 } else 1153 } else 1154 1154 #endif 1155 1155 if (s->aflag) { … … 1169 1169 if (s->aflag == 2) { 1170 1170 gen_op_addq_ESI_T0(); 1171 } else 1171 } else 1172 1172 #endif 1173 1173 if (s->aflag) { … … 1380 1380 { 1381 1381 GenOpFunc *gen_update_cc; 1382 1382 1383 1383 if (d != OR_TMP0) { 1384 1384 gen_op_mov_TN_reg[ot][0][d](); … … 1470 1470 if (s1->cc_op != CC_OP_DYNAMIC) 1471 1471 gen_op_set_cc_op(s1->cc_op); 1472 1472 1473 1473 if (d != OR_TMP0) 1474 1474 gen_op_shift_T0_T1_cc[ot][op](); … … 1510 1510 index = 0; 1511 1511 scale = 0; 1512 1512 1513 1513 if (base == 4) { 1514 1514 havesib = 1; … … 1542 1542 break; 1543 1543 } 1544 1544 1545 1545 if (base >= 0) { 1546 1546 /* for correct popl handling with esp */ … … 1556 1556 gen_op_addq_A0_im64(disp >> 32, disp); 1557 1557 } 1558 } else 1558 } else 1559 1559 #endif 1560 1560 { … … 1570 1570 else 1571 1571 gen_op_movq_A0_im64(disp >> 32, disp); 1572 } else 1572 } else 1573 1573 #endif 1574 1574 { … … 1581 1581 if (s->aflag == 2) { 1582 1582 gen_op_addq_A0_reg_sN[scale][index](); 1583 } else 1583 } else 1584 1584 #endif 1585 1585 { … … 1597 1597 if (s->aflag == 2) { 1598 1598 gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); 1599 } else 1599 } else 1600 1600 #endif 1601 1601 { … … 1689 1689 1690 1690 base = rm; 1691 1691 1692 1692 if (base == 4) { 1693 1693 code = ldub_code(s->pc++); 1694 1694 base = (code & 7); 1695 1695 } 1696 1696 1697 1697 switch (mod) { 1698 1698 case 0: … … 1743 1743 if (CODE64(s)) { 1744 1744 gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); 1745 } else 1745 } else 1746 1746 #endif 1747 1747 { … … 1838 1838 } 1839 1839 1840 static inline void gen_jcc(DisasContext *s, int b, 1840 static inline void gen_jcc(DisasContext *s, int b, 1841 1841 target_ulong val, target_ulong next_eip) 1842 1842 { … … 1849 1849 inv = b & 1; 1850 1850 jcc_op = (b >> 1) & 7; 1851 1851 1852 1852 if (s->jmp_opt) { 1853 1853 #ifdef VBOX … … 1862 1862 func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op]; 1863 1863 break; 1864 1864 1865 1865 /* some jumps are easy to compute */ 1866 1866 case CC_OP_ADDB: … … 1929 1929 func = gen_op_jnz_T0_label; 1930 1930 } 1931 1931 1932 1932 if (inv) { 1933 1933 tmp = val; … … 1987 1987 goto slow_jcc; 1988 1988 break; 1989 1989 1990 1990 /* some jumps are easy to compute */ 1991 1991 case CC_OP_ADDB: … … 2066 2066 if (addend == 8) 2067 2067 gen_op_addq_ESP_8(); 2068 else 2068 else 2069 2069 gen_op_addq_ESP_im(addend); 2070 2070 } else … … 2075 2075 else if (addend == 4) 2076 2076 gen_op_addl_ESP_4(); 2077 else 2077 else 2078 2078 gen_op_addl_ESP_im(addend); 2079 2079 } else { … … 2101 2101 } 2102 2102 gen_op_movq_ESP_A0(); 2103 } else 2103 } else 2104 2104 #endif 2105 2105 { … … 2142 2142 } 2143 2143 gen_op_movq_ESP_A0(); 2144 } else 2144 } else 2145 2145 #endif 2146 2146 { … … 2159 2159 } 2160 2160 gen_op_st_T1_A0[s->dflag + 1 + s->mem_index](); 2161 2161 2162 2162 if (s->ss32 && !s->addseg) 2163 2163 gen_op_movl_ESP_A0(); … … 2174 2174 gen_op_movq_A0_reg[R_ESP](); 2175 2175 gen_op_ld_T0_A0[(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index](); 2176 } else 2176 } else 2177 2177 #endif 2178 2178 { … … 2261 2261 ot = s->dflag ? OT_QUAD : OT_WORD; 2262 2262 opsize = 1 << ot; 2263 2263 2264 2264 gen_op_movl_A0_ESP(); 2265 2265 gen_op_addq_A0_im(-opsize); … … 2275 2275 gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); 2276 2276 gen_op_mov_reg_T1[OT_QUAD][R_ESP](); 2277 } else 2277 } else 2278 2278 #endif 2279 2279 { 2280 2280 ot = s->dflag + OT_WORD; 2281 2281 opsize = 2 << s->dflag; 2282 2282 2283 2283 gen_op_movl_A0_ESP(); 2284 2284 gen_op_addl_A0_im(-opsize); … … 2311 2311 /* an interrupt is different from an exception because of the 2312 2312 priviledge checks */ 2313 static void gen_interrupt(DisasContext *s, int intno, 2313 static void gen_interrupt(DisasContext *s, int intno, 2314 2314 target_ulong cur_eip, target_ulong next_eip) 2315 2315 { … … 2377 2377 static void gen_movtl_T0_im(target_ulong val) 2378 2378 { 2379 #ifdef TARGET_X86_64 2379 #ifdef TARGET_X86_64 2380 2380 if ((int32_t)val == val) { 2381 2381 gen_op_movl_T0_im(val); … … 2390 2390 static void gen_movtl_T1_im(target_ulong val) 2391 2391 { 2392 #ifdef TARGET_X86_64 2392 #ifdef TARGET_X86_64 2393 2393 if ((int32_t)val == val) { 2394 2394 gen_op_movl_T1_im(val); … … 2478 2478 [0x58] = SSE_FOP(add), 2479 2479 [0x59] = SSE_FOP(mul), 2480 [0x5a] = { gen_op_cvtps2pd, gen_op_cvtpd2ps, 2480 [0x5a] = { gen_op_cvtps2pd, gen_op_cvtpd2ps, 2481 2481 gen_op_cvtss2sd, gen_op_cvtsd2ss }, 2482 2482 [0x5b] = { gen_op_cvtdq2ps, gen_op_cvtps2dq, gen_op_cvttps2dq }, … … 2506 2506 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */ 2507 2507 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */ 2508 [0x70] = { (GenOpFunc2 *)gen_op_pshufw_mmx, 2509 (GenOpFunc2 *)gen_op_pshufd_xmm, 2510 (GenOpFunc2 *)gen_op_pshufhw_xmm, 2508 [0x70] = { (GenOpFunc2 *)gen_op_pshufw_mmx, 2509 (GenOpFunc2 *)gen_op_pshufd_xmm, 2510 (GenOpFunc2 *)gen_op_pshufhw_xmm, 2511 2511 (GenOpFunc2 *)gen_op_pshuflw_xmm }, 2512 2512 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */ … … 2590 2590 X86_64_ONLY(gen_op_cvtsq2ss), 2591 2591 X86_64_ONLY(gen_op_cvtsq2sd), 2592 2592 2593 2593 gen_op_cvttss2si, 2594 2594 gen_op_cvttsd2si, … … 2601 2601 X86_64_ONLY(gen_op_cvtsd2sq), 2602 2602 }; 2603 2603 2604 2604 static GenOpFunc2 *sse_op_table4[8][4] = { 2605 2605 SSE_FOP(cmpeq), … … 2612 2612 SSE_FOP(cmpord), 2613 2613 }; 2614 2614 2615 2615 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r) 2616 2616 { … … 2621 2621 2622 2622 b &= 0xff; 2623 if (s->prefix & PREFIX_DATA) 2623 if (s->prefix & PREFIX_DATA) 2624 2624 b1 = 1; 2625 else if (s->prefix & PREFIX_REPZ) 2625 else if (s->prefix & PREFIX_REPZ) 2626 2626 b1 = 2; 2627 else if (s->prefix & PREFIX_REPNZ) 2627 else if (s->prefix & PREFIX_REPNZ) 2628 2628 b1 = 3; 2629 2629 else 2630 2630 b1 = 0; 2631 2631 sse_op2 = sse_op_table1[b][b1]; 2632 if (!sse_op2) 2632 if (!sse_op2) 2633 2633 goto illegal_op; 2634 2634 if (b <= 0x5f || b == 0xc6 || b == 0xc2) { … … 2674 2674 switch(b) { 2675 2675 case 0x0e7: /* movntq */ 2676 if (mod == 3) 2676 if (mod == 3) 2677 2677 goto illegal_op; 2678 2678 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); … … 2693 2693 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); 2694 2694 gen_op_movq_mm_T0_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2695 } else 2695 } else 2696 2696 #endif 2697 2697 { … … 2705 2705 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0); 2706 2706 gen_op_movq_mm_T0_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2707 } else 2707 } else 2708 2708 #endif 2709 2709 { … … 2838 2838 gen_op_movq_T0_mm_mmx(offsetof(CPUX86State,fpregs[reg].mmx)); 2839 2839 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); 2840 } else 2840 } else 2841 2841 #endif 2842 2842 { … … 2850 2850 gen_op_movq_T0_mm_xmm(offsetof(CPUX86State,xmm_regs[reg])); 2851 2851 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1); 2852 } else 2852 } else 2853 2853 #endif 2854 2854 { … … 3050 3050 op2_offset = offsetof(CPUX86State,xmm_regs[rm]); 3051 3051 } 3052 sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + 3052 sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 + 3053 3053 (b & 1) * 4](op2_offset); 3054 3054 gen_op_mov_reg_T0[ot][reg](); 3055 3055 break; 3056 3056 case 0xc4: /* pinsrw */ 3057 case 0x1c4: 3057 case 0x1c4: 3058 3058 s->rip_offset = 1; 3059 3059 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); … … 3068 3068 break; 3069 3069 case 0xc5: /* pextrw */ 3070 case 0x1c5: 3070 case 0x1c5: 3071 3071 if (mod != 3) 3072 3072 goto illegal_op; … … 3130 3130 case 0xf7: 3131 3131 /* maskmov : we must prepare A0 */ 3132 if (mod != 3) 3132 if (mod != 3) 3133 3133 goto illegal_op; 3134 3134 #ifdef TARGET_X86_64 3135 3135 if (s->aflag == 2) { 3136 3136 gen_op_movq_A0_reg[R_EDI](); 3137 } else 3137 } else 3138 3138 #endif 3139 3139 { … … 3211 3211 } 3212 3212 3213 #ifdef VBOX 3214 /* Checks if it's an invalid lock sequence. Only a few instructions 3215 can be used together with the lock prefix and of those only the 3216 form that write a memory operand. So, this is kind of annoying 3217 work to do... 3218 The AMD manual lists the following instructions. 3219 ADC 3220 ADD 3221 AND 3222 BTC 3223 BTR 3224 BTS 3225 CMPXCHG 3226 CMPXCHG8B 3227 CMPXCHG16B 3228 DEC 3229 INC 3230 NEG 3231 NOT 3232 OR 3233 SBB 3234 SUB 3235 XADD 3236 XCHG 3237 XOR */ 3238 static bool is_invalid_lock_sequence(DisasContext *s, target_ulong pc_start, int b) 3239 { 3240 #if 0 /** @todo test this properly! */ 3241 target_ulong pc = s->pc; 3242 int modrm, mod, op; 3243 3244 /* X={8,16,32,64} Y={16,32,64} */ 3245 switch (b) 3246 { 3247 /* /2: ADC reg/memX, immX */ 3248 /* /0: ADD reg/memX, immX */ 3249 /* /4: AND reg/memX, immX */ 3250 /* /1: OR reg/memX, immX */ 3251 /* /3: SBB reg/memX, immX */ 3252 /* /5: SUB reg/memX, immX */ 3253 /* /6: XOR reg/memX, immX */ 3254 case 0x80: 3255 case 0x81: 3256 case 0x83: 3257 modrm = ldub_code(pc++); 3258 op = (modrm >> 3) & 7; 3259 if (op == 7) /* /7: CMP */ 3260 break; 3261 mod = (modrm >> 6) & 3; 3262 if (mod == 3) /* register destination */ 3263 break; 3264 return false; 3265 3266 case 0x10: /* /r: ADC reg/mem8, reg8 */ 3267 case 0x11: /* /r: ADC reg/memX, regY */ 3268 case 0x00: /* /r: ADD reg/mem8, reg8 */ 3269 case 0x01: /* /r: ADD reg/memX, regY */ 3270 case 0x20: /* /r: AND reg/mem8, reg8 */ 3271 case 0x21: /* /r: AND reg/memY, regY */ 3272 case 0x08: /* /r: OR reg/mem8, reg8 */ 3273 case 0x09: /* /r: OR reg/memY, regY */ 3274 case 0x18: /* /r: SBB reg/mem8, reg8 */ 3275 case 0x19: /* /r: SBB reg/memY, regY */ 3276 case 0x28: /* /r: SUB reg/mem8, reg8 */ 3277 case 0x29: /* /r: SUB reg/memY, regY */ 3278 case 0x86: /* /r: XCHG reg/mem8, reg8 or XCHG reg8, reg/mem8 */ 3279 case 0x87: /* /r: XCHG reg/memY, regY or XCHG regY, reg/memY */ 3280 case 0x30: /* /r: XOR reg/mem8, reg8 */ 3281 case 0x31: /* /r: XOR reg/memY, regY */ 3282 modrm = ldub_code(pc++); 3283 mod = (modrm >> 6) & 3; 3284 if (mod == 3) /* register destination */ 3285 break; 3286 return false; 3287 3288 /* /1: DEC reg/memX */ 3289 /* /0: INC reg/memX */ 3290 case 0xfe: 3291 case 0xff: 3292 modrm = ldub_code(pc++); 3293 mod = (modrm >> 6) & 3; 3294 if (mod == 3) /* register destination */ 3295 break; 3296 return false; 3297 3298 /* /3: NEG reg/memX */ 3299 /* /2: NOT reg/memX */ 3300 case 0xf6: 3301 case 0xf7: 3302 modrm = ldub_code(pc++); 3303 mod = (modrm >> 6) & 3; 3304 if (mod == 3) /* register destination */ 3305 break; 3306 return false; 3307 3308 case 0x0f: 3309 b = ldub_code(pc++); 3310 switch (b) 3311 { 3312 /* /7: BTC reg/memY, imm8 */ 3313 /* /6: BTR reg/memY, imm8 */ 3314 /* /5: BTS reg/memY, imm8 */ 3315 case 0xba: 3316 modrm = ldub_code(pc++); 3317 op = (modrm >> 3) & 7; 3318 if (op < 5) 3319 break; 3320 mod = (modrm >> 6) & 3; 3321 if (mod == 3) /* register destination */ 3322 break; 3323 return false; 3324 3325 case 0xbb: /* /r: BTC reg/memY, regY */ 3326 case 0xb3: /* /r: BTR reg/memY, regY */ 3327 case 0xab: /* /r: BTS reg/memY, regY */ 3328 case 0xb0: /* /r: CMPXCHG reg/mem8, reg8 */ 3329 case 0xb1: /* /r: CMPXCHG reg/memY, regY */ 3330 case 0xc0: /* /r: XADD reg/mem8, reg8 */ 3331 case 0xc1: /* /r: XADD reg/memY, regY */ 3332 modrm = ldub_code(pc++); 3333 mod = (modrm >> 6) & 3; 3334 if (mod == 3) /* register destination */ 3335 break; 3336 return false; 3337 3338 /* /1: CMPXCHG8B mem64 or CMPXCHG16B mem128 */ 3339 case 0xc7: 3340 return false; 3341 } 3342 break; 3343 } 3344 3345 /* illegal sequence. */ 3346 Log(("illegal lock sequence %VGv (b=%#x)\n", pc_start, b)); 3347 s->pc = pc; /* XXX: What's the correct value here? */ 3348 return true; 3349 #else 3350 return false; 3351 #endif 3352 } 3353 #endif /* VBOX */ 3354 3213 3355 3214 3356 /* convert one instruction. s->is_jmp is set if the translation must … … 3232 3374 s->rex_x = 0; 3233 3375 s->rex_b = 0; 3234 x86_64_hregs = 0; 3376 x86_64_hregs = 0; 3235 3377 #endif 3236 3378 s->rip_offset = 0; /* for relative ip address */ … … 3299 3441 if (!(prefixes & PREFIX_ADR)) 3300 3442 aflag = 2; 3301 } else 3443 } else 3302 3444 #endif 3303 3445 { … … 3348 3490 3349 3491 /* lock generation */ 3492 #ifndef VBOX 3350 3493 if (prefixes & PREFIX_LOCK) 3351 3494 gen_op_lock(); 3495 #else /* VBOX */ 3496 if (prefixes & PREFIX_LOCK) { 3497 if (is_invalid_lock_sequence(s, pc_start, b)) { 3498 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base); 3499 return s->pc; 3500 } 3501 gen_op_lock(); 3502 } 3503 #endif /* VBOX */ 3352 3504 3353 3505 /* now check op code */ … … 3359 3511 b = ldub_code(s->pc++) | 0x100; 3360 3512 goto reswitch; 3361 3513 3362 3514 /**************************/ 3363 3515 /* arith & logic */ … … 3379 3531 else 3380 3532 ot = dflag + OT_WORD; 3381 3533 3382 3534 switch(f) { 3383 3535 case 0: /* OP Ev, Gv */ … … 3438 3590 else 3439 3591 ot = dflag + OT_WORD; 3440 3592 3441 3593 modrm = ldub_code(s->pc++); 3442 3594 mod = (modrm >> 6) & 3; 3443 3595 rm = (modrm & 7) | REX_B(s); 3444 3596 op = (modrm >> 3) & 7; 3445 3597 3446 3598 if (mod != 3) { 3447 3599 if (b == 0x83) … … 3734 3886 3735 3887 case 0x84: /* test Ev, Gv */ 3736 case 0x85: 3888 case 0x85: 3737 3889 if ((b & 1) == 0) 3738 3890 ot = OT_BYTE; … … 3744 3896 rm = (modrm & 7) | REX_B(s); 3745 3897 reg = ((modrm >> 3) & 7) | rex_r; 3746 3898 3747 3899 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 3748 3900 gen_op_mov_TN_reg[ot][1][reg](); … … 3750 3902 s->cc_op = CC_OP_LOGICB + ot; 3751 3903 break; 3752 3904 3753 3905 case 0xa8: /* test eAX, Iv */ 3754 3906 case 0xa9: … … 3764 3916 s->cc_op = CC_OP_LOGICB + ot; 3765 3917 break; 3766 3918 3767 3919 case 0x98: /* CWDE/CBW */ 3768 3920 #ifdef TARGET_X86_64 … … 3881 4033 s->cc_op = CC_OP_EFLAGS; 3882 4034 break; 3883 4035 3884 4036 /**************************/ 3885 4037 /* push/pop */ … … 4032 4184 modrm = ldub_code(s->pc++); 4033 4185 reg = ((modrm >> 3) & 7) | rex_r; 4034 4186 4035 4187 /* generate a generic store */ 4036 4188 gen_ldst_modrm(s, modrm, ot, reg, 1); … … 4057 4209 case 0x8a: 4058 4210 case 0x8b: /* mov Ev, Gv */ 4211 #ifdef VBOX /* dtrace hot fix */ 4212 if (prefixes & PREFIX_LOCK) 4213 goto illegal_op; 4214 #endif 4059 4215 if ((b & 1) == 0) 4060 4216 ot = OT_BYTE; … … 4063 4219 modrm = ldub_code(s->pc++); 4064 4220 reg = ((modrm >> 3) & 7) | rex_r; 4065 4221 4066 4222 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0); 4067 4223 gen_op_mov_reg_T0[ot][reg](); … … 4115 4271 mod = (modrm >> 6) & 3; 4116 4272 rm = (modrm & 7) | REX_B(s); 4117 4273 4118 4274 if (mod == 3) { 4119 4275 gen_op_mov_TN_reg[ot][0][rm](); … … 4161 4317 gen_op_mov_reg_A0[ot - OT_WORD][reg](); 4162 4318 break; 4163 4319 4164 4320 case 0xa0: /* mov EAX, Ov */ 4165 4321 case 0xa1: … … 4181 4337 else 4182 4338 gen_op_movq_A0_im64(offset_addr >> 32, offset_addr); 4183 } else 4339 } else 4184 4340 #endif 4185 4341 { … … 4206 4362 gen_op_movq_A0_reg[R_EBX](); 4207 4363 gen_op_addq_A0_AL(); 4208 } else 4364 } else 4209 4365 #endif 4210 4366 { … … 4233 4389 gen_movtl_T0_im(tmp); 4234 4390 gen_op_mov_reg_T0[OT_QUAD][reg](); 4235 } else 4391 } else 4236 4392 #endif 4237 4393 { … … 4316 4472 } 4317 4473 break; 4318 4474 4319 4475 /************************/ 4320 4476 /* shifts */ … … 4329 4485 else 4330 4486 ot = dflag + OT_WORD; 4331 4487 4332 4488 modrm = ldub_code(s->pc++); 4333 4489 mod = (modrm >> 6) & 3; 4334 4490 op = (modrm >> 3) & 7; 4335 4491 4336 4492 if (mod != 3) { 4337 4493 if (shift == 2) { … … 4387 4543 rm = (modrm & 7) | REX_B(s); 4388 4544 reg = ((modrm >> 3) & 7) | rex_r; 4389 4545 4390 4546 if (mod != 3) { 4391 4547 gen_lea_modrm(s, modrm, ®_addr, &offset_addr); … … 4395 4551 } 4396 4552 gen_op_mov_TN_reg[ot][1][reg](); 4397 4553 4398 4554 if (shift) { 4399 4555 val = ldub_code(s->pc++); … … 4428 4584 /************************/ 4429 4585 /* floats */ 4430 case 0xd8 ... 0xdf: 4586 case 0xd8 ... 0xdf: 4431 4587 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) { 4432 4588 /* if CR0.EM or CR0.TS are set, generate an FPU exception */ … … 4466 4622 break; 4467 4623 } 4468 4624 4469 4625 gen_op_fp_arith_ST0_FT0[op1](); 4470 4626 if (op1 == 3) { … … 4723 4879 { 4724 4880 int op1; 4725 4881 4726 4882 op1 = op & 7; 4727 4883 if (op >= 0x20) { … … 4793 4949 case 0x28: /* ffree sti */ 4794 4950 gen_op_ffree_STN(opreg); 4795 break; 4951 break; 4796 4952 case 0x2a: /* fst sti */ 4797 4953 gen_op_fmov_STN_ST0(opreg); … … 4893 5049 } 4894 5050 break; 4895 5051 4896 5052 case 0xaa: /* stosS */ 4897 5053 case 0xab: … … 5127 5283 { 5128 5284 unsigned int selector, offset; 5129 5285 5130 5286 if (CODE64(s)) 5131 5287 goto illegal_op; … … 5133 5289 offset = insn_get(s, ot); 5134 5290 selector = insn_get(s, OT_WORD); 5135 5291 5136 5292 gen_op_movl_T0_im(selector); 5137 5293 gen_op_movl_T1_imu(offset); … … 5157 5313 offset = insn_get(s, ot); 5158 5314 selector = insn_get(s, OT_WORD); 5159 5315 5160 5316 gen_op_movl_T0_im(selector); 5161 5317 gen_op_movl_T1_imu(offset); … … 5176 5332 tval = (int32_t)insn_get(s, OT_LONG); 5177 5333 } else { 5178 tval = (int16_t)insn_get(s, OT_WORD); 5334 tval = (int16_t)insn_get(s, OT_WORD); 5179 5335 } 5180 5336 do_jcc: … … 5206 5362 gen_op_cmov_reg_T1_T0[ot - OT_WORD][reg](); 5207 5363 break; 5208 5364 5209 5365 /************************/ 5210 5366 /* flags */ … … 5453 5609 break; 5454 5610 case 0x9b: /* fwait */ 5455 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == 5611 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) == 5456 5612 (HF_MP_MASK | HF_TS_MASK)) { 5457 5613 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base); … … 5466 5622 #ifdef VBOX 5467 5623 if (s->vm86 && s->iopl != 3 && !s->vme) { 5468 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5469 } else 5624 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5625 } else 5470 5626 #endif 5471 5627 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); … … 5478 5634 if (s->vm86 && s->iopl != 3) { 5479 5635 #endif 5480 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5636 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5481 5637 } else { 5482 5638 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base); … … 5574 5730 gen_op_bswapq_T0(); 5575 5731 gen_op_mov_reg_T0[OT_QUAD][reg](); 5576 } else 5732 } else 5577 5733 #endif 5578 5734 { … … 5604 5760 if (s->dflag == 0) 5605 5761 tval &= 0xffff; 5606 5762 5607 5763 l1 = gen_new_label(); 5608 5764 l2 = gen_new_label(); … … 5804 5960 gen_op_movq_A0_reg[R_EBX](); 5805 5961 gen_op_addq_A0_AL(); 5806 } else 5962 } else 5807 5963 #endif 5808 5964 { … … 5889 6045 gen_op_movtl_env_T1(offsetof(CPUX86State,segs[R_GS].base)); 5890 6046 gen_op_movtl_env_T0(offsetof(CPUX86State,kernelgsbase)); 5891 } else 6047 } else 5892 6048 #endif 5893 6049 { … … 5925 6081 mod = (modrm >> 6) & 3; 5926 6082 rm = (modrm & 7) | REX_B(s); 5927 6083 5928 6084 if (mod == 3) { 5929 6085 gen_op_mov_TN_reg[OT_LONG][0][rm](); … … 5941 6097 gen_op_mov_reg_T0[d_ot][reg](); 5942 6098 } 5943 } else 6099 } else 5944 6100 #endif 5945 6101 { … … 6039 6195 gen_eob(s); 6040 6196 } else { 6041 #if !defined(CONFIG_USER_ONLY) 6197 #if !defined(CONFIG_USER_ONLY) 6042 6198 if (reg == 8) 6043 6199 gen_op_movtl_T0_cr8(); … … 6110 6266 switch(op) { 6111 6267 case 0: /* fxsave */ 6112 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || 6268 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || 6113 6269 (s->flags & HF_EM_MASK)) 6114 6270 goto illegal_op; … … 6121 6277 break; 6122 6278 case 1: /* fxrstor */ 6123 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || 6279 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) || 6124 6280 (s->flags & HF_EM_MASK)) 6125 6281 goto illegal_op; … … 6214 6370 6215 6371 /* flags read by an operation */ 6216 static uint16_t opc_read_flags[NB_OPS] = { 6372 static uint16_t opc_read_flags[NB_OPS] = { 6217 6373 [INDEX_op_aas] = CC_A, 6218 6374 [INDEX_op_aaa] = CC_A, … … 6221 6377 6222 6378 /* subtle: due to the incl/decl implementation, C is used */ 6223 [INDEX_op_update_inc_cc] = CC_C, 6379 [INDEX_op_update_inc_cc] = CC_C, 6224 6380 6225 6381 [INDEX_op_into] = CC_O, … … 6347 6503 6348 6504 /* flags written by an operation */ 6349 static uint16_t opc_write_flags[NB_OPS] = { 6505 static uint16_t opc_write_flags[NB_OPS] = { 6350 6506 [INDEX_op_update2_cc] = CC_OSZAPC, 6351 6507 [INDEX_op_update1_cc] = CC_OSZAPC, … … 6353 6509 [INDEX_op_update_neg_cc] = CC_OSZAPC, 6354 6510 /* subtle: due to the incl/decl implementation, C is used */ 6355 [INDEX_op_update_inc_cc] = CC_OSZAPC, 6511 [INDEX_op_update_inc_cc] = CC_OSZAPC, 6356 6512 [INDEX_op_testl_T0_T1_cc] = CC_OSZAPC, 6357 6513 … … 6498 6654 6499 6655 /* simpler form of an operation if no flags need to be generated */ 6500 static uint16_t opc_simpler[NB_OPS] = { 6656 static uint16_t opc_simpler[NB_OPS] = { 6501 6657 [INDEX_op_update2_cc] = INDEX_op_nop, 6502 6658 [INDEX_op_update1_cc] = INDEX_op_nop, … … 6582 6738 information for each intermediate instruction. */ 6583 6739 static inline int gen_intermediate_code_internal(CPUState *env, 6584 TranslationBlock *tb, 6740 TranslationBlock *tb, 6585 6741 int search_pc) 6586 6742 { … … 6591 6747 target_ulong pc_start; 6592 6748 target_ulong cs_base; 6593 6749 6594 6750 /* generate intermediate code */ 6595 6751 pc_start = tb->pc; … … 6686 6842 #ifdef DEBUG 6687 6843 /* 6688 if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS) 6844 if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS) 6689 6845 { 6690 6846 //should never happen as the jump to the patch code terminates the translation block … … 6693 6849 */ 6694 6850 #endif 6695 if (env->state & CPU_EMULATE_SINGLE_INSTR) 6851 if (env->state & CPU_EMULATE_SINGLE_INSTR) 6696 6852 { 6697 6853 env->state &= ~CPU_EMULATE_SINGLE_INSTR; … … 6707 6863 the flag and abort the translation to give the irqs a 6708 6864 change to be happen */ 6709 if (dc->tf || dc->singlestep_enabled || 6865 if (dc->tf || dc->singlestep_enabled || 6710 6866 (flags & HF_INHIBIT_IRQ_MASK) || 6711 6867 (cflags & CF_SINGLE_INSN)) { … … 6730 6886 gen_opc_instr_start[lj++] = 0; 6731 6887 } 6732 6888 6733 6889 #ifdef DEBUG_DISAS 6734 6890 if (loglevel & CPU_LOG_TB_CPU) {
Note:
See TracChangeset
for help on using the changeset viewer.