Changeset 47568 in vbox for trunk/src/VBox
- Timestamp:
- Aug 7, 2013 3:11:58 AM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r47548 r47568 91 91 #include <VBox/vmm/tm.h> 92 92 #include <VBox/vmm/dbgf.h> 93 #include <VBox/vmm/dbgftrace.h> 93 94 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 94 95 # include <VBox/vmm/patm.h> … … 898 899 if (!pIemCpu->fInPatchCode) 899 900 CPUMRawLeave(pVCpu, CPUMCTX2CORE(pCtx), VINF_SUCCESS); 901 #endif 902 903 #ifdef DBGFTRACE_ENABLED 904 switch (enmMode) 905 { 906 case IEMMODE_64BIT: 907 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pIemCpu->uCpl, pCtx->rip); 908 break; 909 case IEMMODE_32BIT: 910 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip); 911 break; 912 case IEMMODE_16BIT: 913 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip); 914 break; 915 } 900 916 #endif 901 917 } … … 2862 2878 uErr = 0; 2863 2879 } 2880 #ifdef DBGFTRACE_ENABLED 2881 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx", 2882 pIemCpu->cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2, 2883 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp); 2884 #endif 2864 2885 2865 2886 /* … … 7564 7585 /** 7565 7586 * Defers the rest of the instruction emulation to a C implementation routine 7566 * and returns, taking t woarguments in addition to the standard ones.7587 * and returns, taking three arguments in addition to the standard ones. 7567 7588 * 7568 7589 * @param a_pfnCImpl The pointer to the C routine. … … 7572 7593 */ 7573 7594 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2) 7595 7596 /** 7597 * Defers the rest of the instruction emulation to a C implementation routine 7598 * and returns, taking four arguments in addition to the standard ones. 7599 * 7600 * @param a_pfnCImpl The pointer to the C routine. 7601 * @param a0 The first extra argument. 7602 * @param a1 The second extra argument. 7603 * @param a2 The third extra argument. 7604 * @param a3 The fourth extra argument. 7605 */ 7606 #define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pIemCpu, pIemCpu->offOpcode, a0, a1, a2, a3) 7574 7607 7575 7608 /** … … 8366 8399 * Enable verification and/or logging. 8367 8400 */ 8368 pIemCpu->fNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */8369 if ( pIemCpu->fNoRem8401 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */; 8402 if ( fNewNoRem 8370 8403 && ( 0 8371 8404 #if 0 /* auto enable on first paged protected mode interrupt */ … … 8438 8471 || (pOrgCtx->rip == 0x000000000215e240) 8439 8472 #endif 8440 #if 1/* DOS's size-overridden iret to v8086. */8473 #if 0 /* DOS's size-overridden iret to v8086. */ 8441 8474 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8) 8475 #endif 8476 #if 1 /* Win3.1: port 64 interception in v8086 mofr */ 8477 || (pOrgCtx->rip == 0xe9d6 && pOrgCtx->cs.Sel == 0xf000 && pOrgCtx->eflags.Bits.u1VM 8478 && pOrgCtx->tr.u64Base == 0x80049e8c && pOrgCtx->tr.u32Limit == 0x2069) 8442 8479 #endif 8443 8480 ) … … 8446 8483 RTLogGroupSettings(NULL, "iem.eo.l6.l2"); 8447 8484 RTLogFlags(NULL, "enabled"); 8448 pIemCpu->fNoRem = false; 8485 fNewNoRem = false; 8486 } 8487 if (fNewNoRem != pIemCpu->fNoRem) 8488 { 8489 pIemCpu->fNoRem = fNewNoRem; 8490 if (!fNewNoRem) 8491 { 8492 LogAlways(("Enabling verification mode!\n")); 8493 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL); 8494 } 8495 else 8496 LogAlways(("Disabling verification mode!\n")); 8449 8497 } 8450 8498 … … 9592 9640 { 9593 9641 iemInitDecoder(&pVCpu->iem.s, false); 9642 #ifdef DBGFTRACE_ENABLED 9643 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx", 9644 u8TrapNo, enmType, uErrCode, uCr2); 9645 #endif 9594 9646 9595 9647 uint32_t fFlags; -
trunk/src/VBox/VMM/VMMAll/IEMAllAImplC.cpp
r47173 r47568 59 59 #endif /* RT_ARCH_X86 */ 60 60 61 62 IEM_DECL_IMPL_DEF(void, iemAImpl_arpl,(uint16_t *pu16Dst, uint16_t u16Src, uint32_t *pEFlags)) 63 { 64 if ((*pu16Dst & X86_SEL_RPL) < (u16Src & X86_SEL_RPL)) 65 { 66 *pu16Dst &= X86_SEL_MASK_OFF_RPL; 67 *pu16Dst |= u16Src & X86_SEL_RPL; 68 69 *pEFlags |= X86_EFL_ZF; 70 } 71 else 72 *pEFlags &= ~X86_EFL_ZF; 73 } 74 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r47565 r47568 2101 2101 if (rcStrict != VINF_SUCCESS) 2102 2102 return rcStrict; 2103 #ifdef DBGFTRACE_ENABLED 2104 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/rm %04x:%04x -> %04x:%04x %x %04llx", 2105 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewRsp); 2106 #endif 2107 2103 2108 pCtx->rip = uNewEip; 2104 2109 pCtx->cs.Sel = uNewCs; … … 2177 2182 * Commit the operation. 2178 2183 */ 2184 uNewFlags &= X86_EFL_LIVE_MASK; 2185 uNewFlags |= X86_EFL_RA1_MASK; 2186 #ifdef DBGFTRACE_ENABLED 2187 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/p/v %04x:%08x -> %04x:%04x %x %04x:%04x", 2188 pCtx->cs.Sel, pCtx->eip, uNewCs, uNewEip, uNewFlags, uNewSs, uNewEsp); 2189 #endif 2190 2191 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags); 2179 2192 iemCImplCommonV8086LoadSeg(&pCtx->cs, uNewCs); 2180 2193 iemCImplCommonV8086LoadSeg(&pCtx->ss, uNewSs); … … 2185 2198 pCtx->rip = uNewEip; 2186 2199 pCtx->rsp = uNewEsp; 2187 uNewFlags &= X86_EFL_LIVE_MASK;2188 uNewFlags |= X86_EFL_RA1_MASK;2189 IEMMISC_SET_EFL(pIemCpu, pCtx, uNewFlags);2190 2200 pIemCpu->uCpl = 3; 2191 2201 … … 2424 2434 } 2425 2435 2436 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 2437 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 2438 if (enmEffOpSize != IEMMODE_16BIT) 2439 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 2440 if (pIemCpu->uCpl == 0) 2441 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */ 2442 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 2443 fEFlagsMask |= X86_EFL_IF; 2444 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx); 2445 fEFlagsNew &= ~fEFlagsMask; 2446 fEFlagsNew |= uNewFlags & fEFlagsMask; 2447 #ifdef DBGFTRACE_ENABLED 2448 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up%u %04x:%08x -> %04x:%04x %x %04x:%04x", 2449 pIemCpu->uCpl, uNewCs & X86_SEL_RPL, pCtx->cs.Sel, pCtx->eip, 2450 uNewCs, uNewEip, uNewFlags, uNewSS, uNewESP); 2451 #endif 2452 2453 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew); 2426 2454 pCtx->rip = uNewEip; 2427 2455 pCtx->cs.Sel = uNewCs; … … 2439 2467 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2440 2468 2441 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF2442 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;2443 if (enmEffOpSize != IEMMODE_16BIT)2444 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;2445 if (pIemCpu->uCpl == 0)2446 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is 0 */2447 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)2448 fEFlagsMask |= X86_EFL_IF;2449 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);2450 fEFlagsNew &= ~fEFlagsMask;2451 fEFlagsNew |= uNewFlags & fEFlagsMask;2452 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);2453 2454 2469 pIemCpu->uCpl = uNewCs & X86_SEL_RPL; 2455 2470 iemHlpAdjustSelectorForNewCpl(pIemCpu, uNewCs & X86_SEL_RPL, &pCtx->ds); … … 2483 2498 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 2484 2499 } 2485 2486 pCtx->rip = uNewEip;2487 pCtx->cs.Sel = uNewCs;2488 pCtx->cs.ValidSel = uNewCs;2489 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;2490 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);2491 pCtx->cs.u32Limit = cbLimitCS;2492 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);2493 pCtx->rsp = uNewRsp;2494 2500 2495 2501 X86EFLAGS NewEfl; … … 2505 2511 NewEfl.u &= ~fEFlagsMask; 2506 2512 NewEfl.u |= fEFlagsMask & uNewFlags; 2513 #ifdef DBGFTRACE_ENABLED 2514 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%up %04x:%08x -> %04x:%04x %x %04x:%04llx", 2515 pIemCpu->uCpl, pCtx->cs.Sel, pCtx->eip, 2516 uNewCs, uNewEip, uNewFlags, pCtx->ss.Sel, uNewRsp); 2517 #endif 2518 2507 2519 IEMMISC_SET_EFL(pIemCpu, pCtx, NewEfl.u); 2520 pCtx->rip = uNewEip; 2521 pCtx->cs.Sel = uNewCs; 2522 pCtx->cs.ValidSel = uNewCs; 2523 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2524 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2525 pCtx->cs.u32Limit = cbLimitCS; 2526 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2527 pCtx->rsp = uNewRsp; 2508 2528 /* Done! */ 2509 2529 } … … 2742 2762 } 2743 2763 2764 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF 2765 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT; 2766 if (enmEffOpSize != IEMMODE_16BIT) 2767 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID; 2768 if (pIemCpu->uCpl == 0) 2769 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */ 2770 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL) 2771 fEFlagsMask |= X86_EFL_IF; 2772 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx); 2773 fEFlagsNew &= ~fEFlagsMask; 2774 fEFlagsNew |= uNewFlags & fEFlagsMask; 2775 #ifdef DBGFTRACE_ENABLED 2776 RTTraceBufAddMsgF(IEMCPU_TO_VM(pIemCpu)->CTX_SUFF(hTraceBuf), "iret/%ul%u %08llx -> %04x:%04llx %llx %04x:%04llx", 2777 pIemCpu->uCpl, uNewCpl, pCtx->rip, uNewCs, uNewRip, uNewFlags, uNewSs, uNewRsp); 2778 #endif 2779 2780 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew); 2744 2781 pCtx->rip = uNewRip; 2745 2782 pCtx->cs.Sel = uNewCs; … … 2768 2805 Log2(("iretq new SS: base=%#RX64 lim=%#x attr=%#x\n", pCtx->ss.u64Base, pCtx->ss.u32Limit, pCtx->ss.Attr.u)); 2769 2806 } 2770 2771 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF2772 | X86_EFL_TF | X86_EFL_DF | X86_EFL_OF | X86_EFL_NT;2773 if (enmEffOpSize != IEMMODE_16BIT)2774 fEFlagsMask |= X86_EFL_RF | X86_EFL_AC | X86_EFL_ID;2775 if (pIemCpu->uCpl == 0)2776 fEFlagsMask |= X86_EFL_IF | X86_EFL_IOPL | X86_EFL_VIF | X86_EFL_VIP; /* VM is ignored */2777 else if (pIemCpu->uCpl <= pCtx->eflags.Bits.u2IOPL)2778 fEFlagsMask |= X86_EFL_IF;2779 uint32_t fEFlagsNew = IEMMISC_GET_EFL(pIemCpu, pCtx);2780 fEFlagsNew &= ~fEFlagsMask;2781 fEFlagsNew |= uNewFlags & fEFlagsMask;2782 IEMMISC_SET_EFL(pIemCpu, pCtx, fEFlagsNew);2783 2807 2784 2808 if (pIemCpu->uCpl != uNewCpl) … … 3327 3351 3328 3352 /** 3329 * Implements verr (fWrite = false) and verw (fWrite = true). 3330 */ 3331 IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite) 3332 { 3353 * Helper for VERR, VERW, LAR, and LSL and loads the descriptor into memory. 3354 * 3355 * @retval VINF_SUCCESS on success. 3356 * @retval VINF_IEM_SELECTOR_NOT_OK if the selector isn't ok. 3357 * @retval iemMemFetchSysU64 return value. 3358 * 3359 * @param pIemCpu The IEM state of the calling EMT. 3360 * @param uSel The selector value. 3361 * @param fAllowSysDesc Whether system descriptors are OK or not. 3362 * @param pDesc Where to return the descriptor on success. 3363 */ 3364 static VBOXSTRICTRC iemCImpl_LoadDescHelper(PIEMCPU pIemCpu, uint16_t uSel, bool fAllowSysDesc, PIEMSELDESC pDesc) 3365 { 3366 pDesc->Long.au64[0] = 0; 3367 pDesc->Long.au64[1] = 0; 3368 3369 if (!(uSel & X86_SEL_MASK_OFF_RPL)) /** @todo test this on 64-bit. */ 3370 return VINF_IEM_SELECTOR_NOT_OK; 3371 3372 /* Within the table limits? */ 3333 3373 PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx); 3334 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu)); 3335 3336 /** @todo figure whether the accessed bit is set or not. */ 3337 3338 bool fAccessible = true; 3339 if (!(uSel & X86_SEL_MASK_OFF_RPL)) 3340 fAccessible = false; /** @todo test this on 64-bit. */ 3374 RTGCPTR GCPtrBase; 3375 if (uSel & X86_SEL_LDT) 3376 { 3377 if ( !pCtx->ldtr.Attr.n.u1Present 3378 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit ) 3379 return VINF_IEM_SELECTOR_NOT_OK; 3380 GCPtrBase = pCtx->ldtr.u64Base; 3381 } 3341 3382 else 3342 3383 { 3343 /* Fetch the descriptor. */ 3344 RTGCPTR GCPtrBase; 3345 if (uSel & X86_SEL_LDT) 3346 { 3347 if ( !pCtx->ldtr.Attr.n.u1Present 3348 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit ) 3349 fAccessible = false; 3350 GCPtrBase = pCtx->ldtr.u64Base; 3351 } 3352 else 3353 { 3354 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) 3355 fAccessible = false; 3356 GCPtrBase = pCtx->gdtr.pGdt; 3357 } 3358 if (fAccessible) 3359 { 3360 IEMSELDESC Desc; 3361 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &Desc.Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK)); 3384 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt) 3385 return VINF_IEM_SELECTOR_NOT_OK; 3386 GCPtrBase = pCtx->gdtr.pGdt; 3387 } 3388 3389 /* Fetch the descriptor. */ 3390 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK)); 3391 if (rcStrict != VINF_SUCCESS) 3392 return rcStrict; 3393 if (!pDesc->Legacy.Gen.u1DescType) 3394 { 3395 if (!fAllowSysDesc) 3396 return VINF_IEM_SELECTOR_NOT_OK; 3397 if (CPUMIsGuestInLongModeEx(pCtx)) 3398 { 3399 rcStrict = iemMemFetchSysU64(pIemCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 8); 3362 3400 if (rcStrict != VINF_SUCCESS) 3363 3401 return rcStrict; 3364 3365 /* Check the descriptor, order doesn't matter much here. */ 3366 if ( !Desc.Legacy.Gen.u1DescType 3367 || !Desc.Legacy.Gen.u1Present) 3402 } 3403 3404 } 3405 3406 return VINF_SUCCESS; 3407 } 3408 3409 3410 /** 3411 * Implements verr (fWrite = false) and verw (fWrite = true). 3412 */ 3413 IEM_CIMPL_DEF_2(iemCImpl_VerX, uint16_t, uSel, bool, fWrite) 3414 { 3415 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu)); 3416 3417 /** @todo figure whether the accessed bit is set or not. */ 3418 3419 bool fAccessible = true; 3420 IEMSELDESC Desc; 3421 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc); 3422 if (rcStrict == VINF_SUCCESS) 3423 { 3424 /* Check the descriptor, order doesn't matter much here. */ 3425 if ( !Desc.Legacy.Gen.u1DescType 3426 || !Desc.Legacy.Gen.u1Present) 3427 fAccessible = false; 3428 else 3429 { 3430 if ( fWrite 3431 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE 3432 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 3368 3433 fAccessible = false; 3434 3435 /** @todo testcase for the conforming behavior. */ 3436 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 3437 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 3438 { 3439 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) 3440 fAccessible = false; 3441 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl) 3442 fAccessible = false; 3443 } 3444 } 3445 3446 } 3447 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK) 3448 fAccessible = false; 3449 else 3450 return rcStrict; 3451 3452 /* commit */ 3453 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible; 3454 3455 iemRegAddToRip(pIemCpu, cbInstr); 3456 return VINF_SUCCESS; 3457 } 3458 3459 3460 /** 3461 * Implements LAR and LSL with 64-bit operand size. 3462 * 3463 * @returns VINF_SUCCESS. 3464 * @param pu16Dst Pointer to the destination register. 3465 * @param uSel The selector to load details for. 3466 * @param pEFlags Pointer to the eflags register. 3467 * @param fIsLar true = LAR, false = LSL. 3468 */ 3469 IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u64, uint64_t *, pu64Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar) 3470 { 3471 Assert(!IEM_IS_REAL_OR_V86_MODE(pIemCpu)); 3472 3473 /** @todo figure whether the accessed bit is set or not. */ 3474 3475 bool fDescOk = true; 3476 IEMSELDESC Desc; 3477 VBOXSTRICTRC rcStrict = iemCImpl_LoadDescHelper(pIemCpu, uSel, false /*fAllowSysDesc*/, &Desc); 3478 if (rcStrict == VINF_SUCCESS) 3479 { 3480 /* 3481 * Check the descriptor type. 3482 */ 3483 if (!Desc.Legacy.Gen.u1DescType) 3484 { 3485 if (CPUMIsGuestInLongModeEx(pIemCpu->CTX_SUFF(pCtx))) 3486 { 3487 if (Desc.Long.Gen.u5Zeros) 3488 fDescOk = false; 3489 else 3490 switch (Desc.Long.Gen.u4Type) 3491 { 3492 /** @todo Intel lists 0 as valid for LSL, verify whether that's correct */ 3493 case AMD64_SEL_TYPE_SYS_TSS_AVAIL: 3494 case AMD64_SEL_TYPE_SYS_TSS_BUSY: 3495 case AMD64_SEL_TYPE_SYS_LDT: /** @todo Intel lists this as invalid for LAR, AMD and 32-bit does otherwise. */ 3496 break; 3497 case AMD64_SEL_TYPE_SYS_CALL_GATE: 3498 fDescOk = fIsLar; 3499 break; 3500 default: 3501 fDescOk = false; 3502 break; 3503 } 3504 } 3369 3505 else 3370 3506 { 3371 if ( fWrite 3372 ? (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE)) != X86_SEL_TYPE_WRITE 3373 : (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE) 3374 fAccessible = false; 3375 3376 /** @todo testcase for the conforming behavior. */ 3377 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 3378 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 3507 switch (Desc.Long.Gen.u4Type) 3379 3508 { 3380 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) 3381 fAccessible = false; 3382 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl) 3383 fAccessible = false; 3509 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: 3510 case X86_SEL_TYPE_SYS_286_TSS_BUSY: 3511 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: 3512 case X86_SEL_TYPE_SYS_386_TSS_BUSY: 3513 case X86_SEL_TYPE_SYS_LDT: 3514 break; 3515 case X86_SEL_TYPE_SYS_286_CALL_GATE: 3516 case X86_SEL_TYPE_SYS_TASK_GATE: 3517 case X86_SEL_TYPE_SYS_386_CALL_GATE: 3518 fDescOk = fIsLar; 3519 break; 3520 default: 3521 fDescOk = false; 3522 break; 3384 3523 } 3385 3524 } 3386 3525 } 3387 } 3388 3389 /* commit */ 3390 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fAccessible; 3391 3526 if (fDescOk) 3527 { 3528 /* 3529 * Check the RPL/DPL/CPL interaction.. 3530 */ 3531 /** @todo testcase for the conforming behavior. */ 3532 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) 3533 || !Desc.Legacy.Gen.u1DescType) 3534 { 3535 if ((unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl) 3536 fDescOk = false; 3537 else if (pIemCpu->uCpl > Desc.Legacy.Gen.u2Dpl) 3538 fDescOk = false; 3539 } 3540 } 3541 3542 if (fDescOk) 3543 { 3544 /* 3545 * All fine, start committing the result. 3546 */ 3547 if (fIsLar) 3548 *pu64Dst = Desc.Legacy.au32[1] & UINT32_C(0x00ffff00); 3549 else 3550 *pu64Dst = X86DESC_LIMIT_G(&Desc.Legacy); 3551 } 3552 3553 } 3554 else if (rcStrict == VINF_IEM_SELECTOR_NOT_OK) 3555 fDescOk = false; 3556 else 3557 return rcStrict; 3558 3559 /* commit flags value and advance rip. */ 3560 pIemCpu->CTX_SUFF(pCtx)->eflags.Bits.u1ZF = fDescOk; 3392 3561 iemRegAddToRip(pIemCpu, cbInstr); 3562 3563 return VINF_SUCCESS; 3564 } 3565 3566 3567 /** 3568 * Implements LAR and LSL with 16-bit operand size. 3569 * 3570 * @returns VINF_SUCCESS. 3571 * @param pu16Dst Pointer to the destination register. 3572 * @param u16Sel The selector to load details for. 3573 * @param pEFlags Pointer to the eflags register. 3574 * @param fIsLar true = LAR, false = LSL. 3575 */ 3576 IEM_CIMPL_DEF_4(iemCImpl_LarLsl_u16, uint16_t *, pu16Dst, uint16_t, uSel, uint32_t *, pEFlags, bool, fIsLar) 3577 { 3578 uint64_t u64TmpDst = *pu16Dst; 3579 IEM_CIMPL_CALL_4(iemCImpl_LarLsl_u64, &u64TmpDst, uSel, pEFlags, fIsLar); 3580 *pu16Dst = (uint16_t)u64TmpDst; 3393 3581 return VINF_SUCCESS; 3394 3582 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructions.cpp.h
r47562 r47568 1135 1135 } 1136 1136 1137 /** Opcode 0x0f 0x00 /3. */ 1138 FNIEMOP_DEF_1(iemOpCommonLarLsl_Gv_Ew, bool, fIsLar) 1139 { 1140 IEMOP_HLP_NO_REAL_OR_V86_MODE(); 1141 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 1142 1143 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 1144 { 1145 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_REG, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 1146 switch (pIemCpu->enmEffOpSize) 1147 { 1148 case IEMMODE_16BIT: 1149 { 1150 IEM_MC_BEGIN(4, 0); 1151 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 1152 IEM_MC_ARG(uint16_t, u16Sel, 1); 1153 IEM_MC_ARG(uint32_t *, pEFlags, 2); 1154 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); 1155 1156 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 1157 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 1158 IEM_MC_REF_EFLAGS(pEFlags); 1159 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg); 1160 1161 IEM_MC_END(); 1162 return VINF_SUCCESS; 1163 } 1164 1165 case IEMMODE_32BIT: 1166 case IEMMODE_64BIT: 1167 { 1168 IEM_MC_BEGIN(4, 0); 1169 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 1170 IEM_MC_ARG(uint16_t, u16Sel, 1); 1171 IEM_MC_ARG(uint32_t *, pEFlags, 2); 1172 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); 1173 1174 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 1175 IEM_MC_FETCH_GREG_U16(u16Sel, (bRm & X86_MODRM_RM_MASK) | pIemCpu->uRexB); 1176 IEM_MC_REF_EFLAGS(pEFlags); 1177 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg); 1178 1179 IEM_MC_END(); 1180 return VINF_SUCCESS; 1181 } 1182 1183 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 1184 } 1185 } 1186 else 1187 { 1188 switch (pIemCpu->enmEffOpSize) 1189 { 1190 case IEMMODE_16BIT: 1191 { 1192 IEM_MC_BEGIN(4, 1); 1193 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 1194 IEM_MC_ARG(uint16_t, u16Sel, 1); 1195 IEM_MC_ARG(uint32_t *, pEFlags, 2); 1196 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); 1197 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 1198 1199 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1200 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 1201 1202 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc); 1203 IEM_MC_REF_GREG_U16(pu16Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 1204 IEM_MC_REF_EFLAGS(pEFlags); 1205 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u16, pu16Dst, u16Sel, pEFlags, fIsLarArg); 1206 1207 IEM_MC_END(); 1208 return VINF_SUCCESS; 1209 } 1210 1211 case IEMMODE_32BIT: 1212 case IEMMODE_64BIT: 1213 { 1214 IEM_MC_BEGIN(4, 1); 1215 IEM_MC_ARG(uint64_t *, pu64Dst, 0); 1216 IEM_MC_ARG(uint16_t, u16Sel, 1); 1217 IEM_MC_ARG(uint32_t *, pEFlags, 2); 1218 IEM_MC_ARG_CONST(bool, fIsLarArg, fIsLar, 3); 1219 IEM_MC_LOCAL(RTGCPTR, GCPtrEffSrc); 1220 1221 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffSrc, bRm, 0); 1222 IEMOP_HLP_DECODED_NL_2(fIsLar ? OP_LAR : OP_LSL, IEMOPFORM_RM_MEM, OP_PARM_Gv, OP_PARM_Ew, DISOPTYPE_DANGEROUS | DISOPTYPE_PRIVILEGED_NOTRAP); 1223 1224 IEM_MC_FETCH_MEM_U16(u16Sel, pIemCpu->iEffSeg, GCPtrEffSrc); 1225 IEM_MC_REF_GREG_U64(pu64Dst, ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pIemCpu->uRexReg); 1226 IEM_MC_REF_EFLAGS(pEFlags); 1227 IEM_MC_CALL_CIMPL_4(iemCImpl_LarLsl_u64, pu64Dst, u16Sel, pEFlags, fIsLarArg); 1228 1229 IEM_MC_END(); 1230 return VINF_SUCCESS; 1231 } 1232 1233 IEM_NOT_REACHED_DEFAULT_CASE_RET(); 1234 } 1235 } 1236 } 1237 1238 1137 1239 1138 1240 /** Opcode 0x0f 0x02. */ 1139 FNIEMOP_STUB(iemOp_lar_Gv_Ew); 1241 FNIEMOP_DEF(iemOp_lar_Gv_Ew) 1242 { 1243 IEMOP_MNEMONIC("lar Gv,Ew"); 1244 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, true); 1245 } 1246 1247 1140 1248 /** Opcode 0x0f 0x03. */ 1141 FNIEMOP_STUB(iemOp_lsl_Gv_Ew); 1249 FNIEMOP_DEF(iemOp_lsl_Gv_Ew) 1250 { 1251 IEMOP_MNEMONIC("lsl Gv,Ew"); 1252 return FNIEMOP_CALL_1(iemOpCommonLarLsl_Gv_Ew, false); 1253 } 1142 1254 1143 1255 … … 7928 8040 FNIEMOP_STUB(iemOp_bound_Gv_Ma); 7929 8041 8042 7930 8043 /** Opcode 0x63 - non-64-bit modes. */ 7931 FNIEMOP_STUB(iemOp_arpl_Ew_Gw); 8044 FNIEMOP_DEF(iemOp_arpl_Ew_Gw) 8045 { 8046 IEMOP_MNEMONIC("arpl Ew,Gw"); 8047 IEMOP_HLP_NO_REAL_OR_V86_MODE(); 8048 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 8049 8050 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 8051 { 8052 /* Register */ 8053 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS); 8054 IEM_MC_BEGIN(3, 0); 8055 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 8056 IEM_MC_ARG(uint16_t, u16Src, 1); 8057 IEM_MC_ARG(uint32_t *, pEFlags, 2); 8058 8059 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 8060 IEM_MC_REF_GREG_U16(pu16Dst, (bRm & X86_MODRM_RM_MASK)); 8061 IEM_MC_REF_EFLAGS(pEFlags); 8062 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags); 8063 8064 IEM_MC_ADVANCE_RIP(); 8065 IEM_MC_END(); 8066 } 8067 else 8068 { 8069 /* Memory */ 8070 IEM_MC_BEGIN(3, 2); 8071 IEM_MC_ARG(uint16_t *, pu16Dst, 0); 8072 IEM_MC_ARG(uint16_t, u16Src, 1); 8073 IEM_MC_ARG_LOCAL_EFLAGS(pEFlags, EFlags, 2); 8074 IEM_MC_LOCAL(RTGCPTR, GCPtrEffDst); 8075 8076 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEffDst, bRm, 0); 8077 IEMOP_HLP_DECODED_NL_2(OP_ARPL, IEMOPFORM_MR_REG, OP_PARM_Ew, OP_PARM_Gw, DISOPTYPE_HARMLESS); 8078 IEM_MC_MEM_MAP(pu16Dst, IEM_ACCESS_DATA_RW, pIemCpu->iEffSeg, GCPtrEffDst, 0 /*arg*/); 8079 IEM_MC_FETCH_GREG_U16(u16Src, (bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 8080 IEM_MC_FETCH_EFLAGS(EFlags); 8081 IEM_MC_CALL_VOID_AIMPL_3(iemAImpl_arpl, pu16Dst, u16Src, pEFlags); 8082 8083 IEM_MC_MEM_COMMIT_AND_UNMAP(pu16Dst, IEM_ACCESS_DATA_RW); 8084 IEM_MC_COMMIT_EFLAGS(EFlags); 8085 IEM_MC_ADVANCE_RIP(); 8086 IEM_MC_END(); 8087 } 8088 return VINF_SUCCESS; 8089 8090 } 7932 8091 7933 8092 -
trunk/src/VBox/VMM/include/IEMInternal.h
r47444 r47568 896 896 /** @} */ 897 897 898 /** @name Misc. 899 * @{ */ 900 FNIEMAIMPLBINU16 iemAImpl_arpl; 901 /** @} */ 902 898 903 899 904 /** @name FPU operations taking a 32-bit float argument -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r47413 r47568 574 574 #define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) \ 575 575 do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); } while (0) 576 #define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) \ 577 do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); } while (0) 576 578 #define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) \ 577 579 do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); CHK_CALL_ARG(a4, 4); } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.