- Timestamp:
- Mar 20, 2007 11:10:58 PM (18 years ago)
- Location:
- trunk/src/recompiler/new
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/recompiler/new/VBoxRecompiler.c
r1200 r1588 171 171 remR3HandlerWriteU32 172 172 }; 173 174 175 #if 0 /* exec.c:99 */176 /*177 * Instance stuff.178 */179 /** Pointer to the cpu state. */180 CPUState *cpu_single_env;181 #endif182 173 183 174 … … 1210 1201 if (!(env->eflags & IF_MASK)) 1211 1202 { 1212 #ifdef VBOX_RAW_V861213 if(!(fFlags & VM_MASK))1214 return false;1215 #else1216 1203 STAM_COUNTER_INC(&gStatRefuseIF0); 1217 1204 Log2(("raw mode refused: IF (RawR3)\n")); 1218 1205 return false; 1219 #endif1220 1206 } 1221 1207 … … 2028 2014 Log2(("REMR3State: trap=%02x errcd=%VGv cr2=%VGv nexteip=%VGv%s\n", pVM->rem.s.Env.exception_index, pVM->rem.s.Env.error_code, 2029 2015 pVM->rem.s.Env.cr[2], pVM->rem.s.Env.exception_next_eip, pVM->rem.s.Env.exception_is_int ? " software" : "")); 2030 //if (pVM->rem.s.Env.eip == 0x40005a2f)2031 // pVM->rem.s.Env.state |= CPU_EMULATE_SINGLE_STEP | CPU_RAW_MODE_DISABLED | CPU_RAWR0_MODE_DISABLED;2032 2016 } 2033 2017 … … 2765 2749 && off < PGM_DYNAMIC_CHUNK_SIZE) 2766 2750 { 2767 Log (("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys1 + off));2751 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys1 + off)); 2768 2752 return pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys1 + off; 2769 2753 } … … 2773 2757 && off < PGM_DYNAMIC_CHUNK_SIZE) 2774 2758 { 2775 Log (("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys2 + off));2759 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys2 + off)); 2776 2760 return pVM->rem.s.paHCVirtToGCPhys[idx].GCPhys2 + off; 2777 2761 } … … 2783 2767 if (off < pVM->rem.s.aPhysReg[i].cb) 2784 2768 { 2785 Log (("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.aPhysReg[i].GCPhys + off));2769 Log2(("remR3HCVirt2GCPhys %x -> %x\n", addr, pVM->rem.s.aPhysReg[i].GCPhys + off)); 2786 2770 return pVM->rem.s.aPhysReg[i].GCPhys + off; 2787 2771 } -
trunk/src/recompiler/new/cpu-exec.c
r644 r1588 498 498 if(env->eflags & VM_MASK) 499 499 { 500 Log(("EMV86: %0 8X IF=%d TF=%d CPL=%d CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0]));500 Log(("EMV86: %04X:%04X IF=%d TF=%d CPL=%d CR0=%08X\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0])); 501 501 } 502 502 else -
trunk/src/recompiler/new/target-i386/exec.h
r644 r1588 182 182 void OPPROTO op_movl_eflags_T0(void); 183 183 void OPPROTO op_movl_T0_eflags(void); 184 #ifdef VBOX 185 void OPPROTO op_movl_T0_eflags_vme(void); 186 void OPPROTO op_movw_eflags_T0_vme(void); 187 void OPPROTO op_cli_vme(void); 188 void OPPROTO op_sti_vme(void); 189 #endif 184 190 void helper_divl_EAX_T0(void); 185 191 void helper_idivl_EAX_T0(void); -
trunk/src/recompiler/new/target-i386/helper.c
r1094 r1588 816 816 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); 817 817 } 818 819 #ifdef VBOX 820 821 /* check if VME interrupt redirection is enabled in TSS */ 822 static inline bool is_vme_irq_redirected(int intno) 823 { 824 int io_offset, intredir_offset; 825 unsigned char val, mask; 826 827 /* TSS must be a valid 32 bit one */ 828 if (!(env->tr.flags & DESC_P_MASK) || 829 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 || 830 env->tr.limit < 103) 831 goto fail; 832 io_offset = lduw_kernel(env->tr.base + 0x66); 833 /* the virtual interrupt redirection bitmap is located below the io bitmap */ 834 intredir_offset = io_offset - 0x20; 835 836 intredir_offset += (intno >> 3); 837 if ((intredir_offset) > env->tr.limit) 838 goto fail; 839 840 val = ldub_kernel(env->tr.base + intredir_offset); 841 mask = 1 << (unsigned char)(intno & 7); 842 843 /* bit set means no redirection. */ 844 if ((val & mask) != 0) { 845 return false; 846 } 847 return true; 848 849 fail: 850 raise_exception_err(EXCP0D_GPF, 0); 851 return true; 852 } 853 854 /* V86 mode software interrupt with CR4.VME=1 */ 855 static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip) 856 { 857 target_ulong ptr, ssp; 858 int selector; 859 uint32_t offset, esp; 860 uint32_t old_cs, old_eflags; 861 uint32_t iopl; 862 863 iopl = ((env->eflags >> IOPL_SHIFT) & 3); 864 865 if (!is_vme_irq_redirected(intno)) 866 { 867 if (iopl == 3) 868 /* normal protected mode handler call */ 869 return do_interrupt_protected(intno, 1, error_code, next_eip, 0); 870 else 871 raise_exception_err(EXCP0D_GPF, 0); 872 } 873 874 /* virtual mode idt is at linear address 0 */ 875 ptr = 0 + intno * 4; 876 offset = lduw_kernel(ptr); 877 selector = lduw_kernel(ptr + 2); 878 esp = ESP; 879 ssp = env->segs[R_SS].base; 880 old_cs = env->segs[R_CS].selector; 881 882 old_eflags = compute_eflags(); 883 if (iopl < 3) 884 { 885 /* copy VIF into IF and set IOPL to 3 */ 886 if (env->eflags & VIF_MASK) 887 old_eflags |= IF_MASK; 888 else 889 old_eflags &= ~IF_MASK; 890 891 old_eflags |= (3 << IOPL_SHIFT); 892 } 893 894 /* XXX: use SS segment size ? */ 895 PUSHW(ssp, esp, 0xffff, old_eflags); 896 PUSHW(ssp, esp, 0xffff, old_cs); 897 PUSHW(ssp, esp, 0xffff, next_eip); 898 899 /* update processor state */ 900 ESP = (ESP & ~0xffff) | (esp & 0xffff); 901 env->eip = offset; 902 env->segs[R_CS].selector = selector; 903 env->segs[R_CS].base = (selector << 4); 904 env->eflags &= ~(TF_MASK | RF_MASK); 905 906 if (iopl < 3) 907 env->eflags &= ~IF_MASK; 908 else 909 env->eflags &= ~VIF_MASK; 910 } 911 #endif /* VBOX */ 818 912 819 913 #ifdef TARGET_X86_64 … … 1245 1339 #endif 1246 1340 { 1247 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); 1341 #ifdef VBOX 1342 /* int xx *, v86 code and VME enabled? */ 1343 if ( (env->eflags & VM_MASK) 1344 && (env->cr[4] & CR4_VME_MASK) 1345 && is_int 1346 && !is_hw 1347 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */ 1348 ) 1349 do_soft_interrupt_vme(intno, error_code, next_eip); 1350 else 1351 #endif /* VBOX */ 1352 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw); 1248 1353 } 1249 1354 } else { … … 2356 2461 target_ulong ssp; 2357 2462 int eflags_mask; 2358 2359 2463 #ifdef VBOX 2464 bool fVME = false; 2465 2360 2466 remR3TrapClear(env->pVM); 2361 #endif 2467 #endif /* VBOX */ 2362 2468 2363 2469 sp_mask = 0xffff; /* XXXX: use SS segment size ? */ … … 2376 2482 POPW(ssp, sp, sp_mask, new_eflags); 2377 2483 } 2484 #ifdef VBOX 2485 if ( (env->eflags & VM_MASK) 2486 && ((env->eflags >> IOPL_SHIFT) & 3) != 3 2487 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */ 2488 { 2489 fVME = true; 2490 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */ 2491 /* if TF will be set -> #GP */ 2492 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK)) 2493 || (new_eflags & TF_MASK)) 2494 raise_exception(EXCP0D_GPF); 2495 } 2496 #endif /* VBOX */ 2497 2378 2498 ESP = (ESP & ~sp_mask) | (sp & sp_mask); 2379 2499 load_seg_vm(R_CS, new_cs); 2380 2500 env->eip = new_eip; 2501 #ifdef VBOX 2502 if (fVME) 2503 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2504 else 2505 #endif 2381 2506 if (env->eflags & VM_MASK) 2382 2507 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK; … … 2386 2511 eflags_mask &= 0xffff; 2387 2512 load_eflags(new_eflags, eflags_mask); 2513 2514 #ifdef VBOX 2515 if (fVME) 2516 { 2517 if (new_eflags & IF_MASK) 2518 env->eflags |= VIF_MASK; 2519 else 2520 env->eflags &= ~VIF_MASK; 2521 } 2522 #endif /* VBOX */ 2388 2523 } 2389 2524 … … 2621 2756 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK; 2622 2757 if (cpl == 0) 2758 #ifdef VBOX 2759 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK; 2760 #else 2623 2761 eflags_mask |= IOPL_MASK; 2762 #endif 2624 2763 iopl = (env->eflags >> IOPL_SHIFT) & 3; 2625 2764 if (cpl <= iopl) -
trunk/src/recompiler/new/target-i386/op.c
r644 r1588 492 492 uint32_t idx = (PARAM1 - offsetof(CPUX86State,segs[0].base)) / sizeof(SegmentCache); 493 493 494 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) 494 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) { 495 495 sync_seg(env, idx, env->segs[idx].newselector); 496 } 497 /* Loading a null selector into a segment register is valid, but using it is most definitely not! */ 498 if ( (env->cr[0] & (CR0_PE_MASK|CR0_PG_MASK)) == (CR0_PE_MASK|CR0_PG_MASK) 499 && !(env->eflags & VM_MASK) 500 && env->segs[idx].selector == 0) { 501 raise_exception(EXCP0D_GPF); 502 } 496 503 A0 = (uint32_t)env->segs[idx].base; 504 FORCE_RET(); 497 505 #else /* !VBOX */ 498 506 A0 = (uint32_t)*(target_ulong *)((char *)env + PARAM1); … … 505 513 uint32_t idx = (PARAM1 - offsetof(CPUX86State,segs[0].base)) / sizeof(SegmentCache); 506 514 507 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) 515 if (env->segs[idx].newselector && !(env->eflags & VM_MASK)) { 508 516 sync_seg(env, idx, env->segs[idx].newselector); 517 } 518 /* Loading a null selector into a segment register is valid, but using it is most definitely not! */ 519 if ( (env->cr[0] & (CR0_PE_MASK|CR0_PG_MASK)) == (CR0_PE_MASK|CR0_PG_MASK) 520 && !(env->eflags & VM_MASK) 521 && env->segs[idx].selector == 0) { 522 raise_exception(EXCP0D_GPF); 523 } 509 524 A0 = (uint32_t)(A0 + env->segs[idx].base); 525 FORCE_RET(); 510 526 #else /* !VBOX */ 511 527 A0 = (uint32_t)(A0 + *(target_ulong *)((char *)env + PARAM1)); … … 718 734 } 719 735 736 #ifndef VBOX 720 737 #if 0 721 738 /* vm86plus instructions */ … … 735 752 } 736 753 #endif 754 755 #else /* VBOX */ 756 void OPPROTO op_cli_vme(void) 757 { 758 env->eflags &= ~VIF_MASK; 759 } 760 761 void OPPROTO op_sti_vme(void) 762 { 763 /* First check, then change eflags according to the AMD manual */ 764 if (env->eflags & VIP_MASK) { 765 raise_exception(EXCP0D_GPF); 766 } 767 env->eflags |= VIF_MASK; 768 FORCE_RET(); 769 } 770 #endif /* VBOX */ 737 771 738 772 void OPPROTO op_boundw(void) … … 1465 1499 1466 1500 /* XXX: clear VIF/VIP in all ops ? */ 1501 #ifdef VBOX 1502 /* XXX: AMD docs say they remain unchanged. */ 1503 #endif 1467 1504 1468 1505 void OPPROTO op_movl_eflags_T0(void) … … 1496 1533 } 1497 1534 1535 #ifndef VBOX 1498 1536 #if 0 1499 1537 /* vm86plus version */ … … 1537 1575 #endif 1538 1576 1577 #else /* VBOX */ 1578 /* IOPL != 3, CR4.VME=1 */ 1579 void OPPROTO op_movw_eflags_T0_vme(void) 1580 { 1581 unsigned int new_eflags = T0; 1582 1583 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */ 1584 /* if TF will be set -> #GP */ 1585 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK)) 1586 || (new_eflags & TF_MASK)) { 1587 raise_exception(EXCP0D_GPF); 1588 } else { 1589 load_eflags(new_eflags, (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff); 1590 1591 if (new_eflags & IF_MASK) { 1592 env->eflags |= VIF_MASK; 1593 } else { 1594 env->eflags &= ~VIF_MASK; 1595 } 1596 } 1597 1598 FORCE_RET(); 1599 } 1600 #endif /* VBOX */ 1601 1539 1602 /* XXX: compute only O flag */ 1540 1603 void OPPROTO op_movb_eflags_T0(void) … … 1555 1618 1556 1619 /* vm86plus version */ 1557 #if 01558 void OPPROTO op_movl_T0_eflags_vm (void)1620 #ifdef VBOX /* #if 0 */ 1621 void OPPROTO op_movl_T0_eflags_vme(void) 1559 1622 { 1560 1623 int eflags; … … 1566 1629 T0 = eflags; 1567 1630 } 1568 #endif 1631 #endif /* VBOX / 0 */ 1569 1632 1570 1633 void OPPROTO op_cld(void) -
trunk/src/recompiler/new/target-i386/translate.c
r644 r1588 121 121 int f_st; /* currently unused */ 122 122 int vm86; /* vm86 mode */ 123 #ifdef VBOX 124 int vme; /* CR4.VME */ 125 #endif 123 126 int cpl; 124 127 int iopl; … … 832 835 gen_op_check_external_event(); 833 836 } 837 838 static inline void gen_update_eip(target_ulong pc) 839 { 840 #ifdef TARGET_X86_64 841 if (pc == (uint32_t)pc) { 842 gen_op_movl_eip_im(pc); 843 } else if (pc == (int32_t)pc) { 844 gen_op_movq_eip_im(pc); 845 } else { 846 gen_op_movq_eip_im64(pc >> 32, pc); 847 } 848 #else 849 gen_op_movl_eip_im(pc); 850 #endif 851 } 852 834 853 #endif /* VBOX */ 835 854 … … 3215 3234 #endif 3216 3235 s->rip_offset = 0; /* for relative ip address */ 3236 3237 #ifdef VBOX 3238 /* Always update EIP. Otherwise one must be very careful with generated code that can raise exceptions. */ 3239 gen_update_eip(pc_start - s->cs_base); 3240 #endif 3241 3217 3242 next_byte: 3218 3243 b = ldub_code(s->pc); … … 5060 5085 s->cc_op = CC_OP_EFLAGS; 5061 5086 } else if (s->vm86) { 5087 #ifdef VBOX 5088 if (s->iopl != 3 && (!s->vme || s->dflag)) { 5089 #else 5062 5090 if (s->iopl != 3) { 5091 #endif 5063 5092 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5064 5093 } else { … … 5176 5205 /* flags */ 5177 5206 case 0x9c: /* pushf */ 5207 #ifdef VBOX 5208 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) { 5209 #else 5178 5210 if (s->vm86 && s->iopl != 3) { 5211 #endif 5179 5212 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5180 5213 } else { 5181 5214 if (s->cc_op != CC_OP_DYNAMIC) 5182 5215 gen_op_set_cc_op(s->cc_op); 5183 gen_op_movl_T0_eflags(); 5216 #ifdef VBOX 5217 if (s->vm86 && s->vme && s->iopl != 3) 5218 gen_op_movl_T0_eflags_vme(); 5219 else 5220 #endif 5221 gen_op_movl_T0_eflags(); 5184 5222 gen_push_T0(s); 5185 5223 } 5186 5224 break; 5187 5225 case 0x9d: /* popf */ 5226 #ifdef VBOX 5227 if (s->vm86 && s->iopl != 3 && (!s->vme || s->dflag)) { 5228 #else 5188 5229 if (s->vm86 && s->iopl != 3) { 5230 #endif 5189 5231 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5190 5232 } else { … … 5207 5249 gen_op_movl_eflags_T0(); 5208 5250 } else { 5209 gen_op_movw_eflags_T0(); 5251 #ifdef VBOX 5252 if (s->vm86 && s->vme) 5253 gen_op_movw_eflags_T0_vme(); 5254 else 5255 #endif 5256 gen_op_movw_eflags_T0(); 5210 5257 } 5211 5258 } … … 5408 5455 break; 5409 5456 case 0xcc: /* int3 */ 5410 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); 5457 #ifdef VBOX 5458 if (s->vm86 && s->iopl != 3 && !s->vme) { 5459 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5460 } else 5461 #endif 5462 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base); 5411 5463 break; 5412 5464 case 0xcd: /* int N */ 5413 5465 val = ldub_code(s->pc++); 5466 #ifdef VBOX 5467 if (s->vm86 && s->iopl != 3 && !s->vme) { 5468 #else 5414 5469 if (s->vm86 && s->iopl != 3) { 5470 #endif 5415 5471 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 5416 5472 } else { … … 5445 5501 if (s->iopl == 3) { 5446 5502 gen_op_cli(); 5503 #ifdef VBOX 5504 } else if (s->iopl != 3 && s->vme) { 5505 gen_op_cli_vme(); 5506 #endif 5447 5507 } else { 5448 5508 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); … … 5469 5529 if (s->iopl == 3) { 5470 5530 goto gen_sti; 5531 #ifdef VBOX 5532 } else if (s->iopl != 3 && s->vme) { 5533 gen_op_sti_vme(); 5534 /* give a chance to handle pending irqs */ 5535 gen_jmp_im(s->pc - s->cs_base); 5536 gen_eob(s); 5537 #endif 5471 5538 } else { 5472 5539 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); … … 6525 6592 dc->f_st = 0; 6526 6593 dc->vm86 = (flags >> VM_SHIFT) & 1; 6594 #ifdef VBOX 6595 dc->vme = !!(env->cr[4] & CR4_VME_MASK); 6596 #endif 6527 6597 dc->cpl = (flags >> HF_CPL_SHIFT) & 3; 6528 6598 dc->iopl = (flags >> IOPL_SHIFT) & 3;
Note:
See TracChangeset
for help on using the changeset viewer.