Changeset 42407 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 26, 2012 11:41:35 AM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r42193 r42407 65 65 */ 66 66 #if defined(VBOX_WITH_RAW_MODE) && !defined(IN_RING0) 67 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg , a_fIsCS) \67 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \ 68 68 do \ 69 69 { \ 70 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_p SReg)) \71 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg , a_fIsCS); \70 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)) \ 71 cpumGuestLazyLoadHiddenSelectorReg(a_pVCpu, a_pSReg); \ 72 72 } while (0) 73 73 #else 74 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg , a_fIsCS) \75 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_p SReg));74 # define CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(a_pVCpu, a_pSReg) \ 75 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(a_pVCpu, a_pSReg)); 76 76 #endif 77 77 … … 85 85 * @param pVCpu The current Virtual CPU. 86 86 * @param pSReg The selector register to lazily load hidden parts of. 87 * @param fIsCS 88 */ 89 static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg, bool fIsCS) 90 { 91 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pSReg)); 87 */ 88 static void cpumGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg) 89 { 90 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 92 91 Assert(!HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))); 92 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT); 93 93 94 94 if (pVCpu->cpum.s.Guest.eflags.Bits.u1VM) … … 96 96 /* V8086 mode - Tightly controlled environment, no question about the limit or flags. */ 97 97 pSReg->Attr.u = 0; 98 pSReg->Attr.n.u4Type = pSReg == &pVCpu->cpum.s.Guest.cs ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC; 98 99 pSReg->Attr.n.u1DescType = 1; /* code/data segment */ 100 pSReg->Attr.n.u2Dpl = 3; 99 101 pSReg->Attr.n.u1Present = 1; 100 pSReg->Attr.n.u4Type = fIsCS ? X86_SEL_TYPE_ER_ACC : X86_SEL_TYPE_RW_ACC;101 102 pSReg->u32Limit = 0x0000ffff; 102 103 pSReg->u64Base = (uint32_t)pSReg->Sel << 4; … … 140 141 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenCsAndSs(PVMCPU pVCpu) 141 142 { 142 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);143 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss , false);143 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 144 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.ss); 144 145 } 145 146 … … 152 153 VMM_INT_DECL(void) CPUMGuestLazyLoadHiddenSelectorReg(PVMCPU pVCpu, PCPUMSELREG pSReg) 153 154 { 154 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg , pSReg == &pVCpu->cpum.s.Guest.cs);155 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, pSReg); 155 156 } 156 157 … … 565 566 VMMDECL(int) CPUMSetGuestLDTR(PVMCPU pVCpu, uint16_t ldtr) 566 567 { 567 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr; 568 pVCpu->cpum.s.Guest.ldtr.Sel = ldtr; 569 /* The caller will set more hidden bits if it has them. */ 570 pVCpu->cpum.s.Guest.ldtr.ValidSel = 0; 571 pVCpu->cpum.s.Guest.ldtr.fFlags = 0; 568 572 pVCpu->cpum.s.fChanged |= CPUM_CHANGED_LDTR; 569 573 return VINF_SUCCESS; … … 2332 2336 if (!CPUMIsGuestInLongMode(pVCpu)) 2333 2337 return false; 2334 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);2338 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 2335 2339 return pVCpu->cpum.s.Guest.cs.Attr.n.u1Long; 2336 2340 } … … 2349 2353 } 2350 2354 2355 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 2356 /** 2357 * 2358 * @returns @c true if we've entered raw-mode and selectors with RPL=1 are 2359 * really RPL=0, @c false if we've not (RPL=1 really is RPL=1). 2360 * @param pVCpu The current virtual CPU. 2361 */ 2362 VMM_INT_DECL(bool) CPUMIsGuestInRawMode(PVMCPU pVCpu) 2363 { 2364 return pVCpu->cpum.s.fRawEntered; 2365 } 2366 #endif 2351 2367 2352 2368 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 … … 2561 2577 if (!pVCpu->cpum.s.Guest.eflags.Bits.u1VM) 2562 2578 { 2563 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pVCpu->cpum.s.Guest.ss))2579 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.s.Guest.ss)) 2564 2580 uCpl = pVCpu->cpum.s.Guest.ss.Attr.n.u2Dpl; 2565 2581 else … … 2620 2636 } 2621 2637 2622 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);2638 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 2623 2639 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long 2624 2640 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA)) … … 2643 2659 } 2644 2660 2645 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs , true);2661 CPUMSELREG_LAZY_LOAD_HIDDEN_PARTS(pVCpu, &pVCpu->cpum.s.Guest.cs); 2646 2662 if ( pVCpu->cpum.s.Guest.cs.Attr.n.u1Long 2647 2663 && (pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA)) -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r42193 r42407 1536 1536 } 1537 1537 1538 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)1539 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )1540 {1541 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #GP\n", NewSS, pDesc->Legacy.Gen.u4Type));1542 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, NewSS);1543 }1544 1538 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 1545 1539 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) … … 1914 1908 ? Idte.Gate.u16OffsetLow 1915 1909 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16); 1916 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy); 1917 if (DescCS.Legacy.Gen.u1Granularity) 1918 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1910 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); 1919 1911 if (uNewEip > cbLimitCS) 1920 1912 { … … 1951 1943 1952 1944 /* Check that there is sufficient space for the stack frame. */ 1953 uint32_t cbLimitSS = X86DESC_LIMIT(DescSS.Legacy); 1954 if (DescSS.Legacy.Gen.u1Granularity) 1955 cbLimitSS = (cbLimitSS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1945 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy); 1956 1946 AssertReturn(!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 1957 1947 … … 1972 1962 RTPTRUNION uStackFrame; 1973 1963 rcStrict = iemMemMap(pIemCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX, 1974 uNewEsp - cbStackFrame + X86DESC_BASE( DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */1964 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */ 1975 1965 if (rcStrict != VINF_SUCCESS) 1976 1966 return rcStrict; … … 2016 2006 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2017 2007 pCtx->ss.u32Limit = cbLimitSS; 2018 pCtx->ss.u64Base = X86DESC_BASE( DescSS.Legacy);2019 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR( DescSS.Legacy);2008 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2009 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2020 2010 pCtx->rsp = uNewEsp - cbStackFrame; /** @todo Is the high word cleared for 16-bit stacks and/or interrupt handlers? */ 2021 2011 pIemCpu->uCpl = uNewCpl; … … 2064 2054 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2065 2055 pCtx->cs.u32Limit = cbLimitCS; 2066 pCtx->cs.u64Base = X86DESC_BASE( DescCS.Legacy);2067 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCS.Legacy);2056 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2057 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2068 2058 2069 2059 pCtx->rip = uNewEip; … … 2682 2672 } 2683 2673 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 2684 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( pSReg))2674 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg)) 2685 2675 CPUMGuestLazyLoadHiddenSelectorReg(IEMCPU_TO_VMCPU(pIemCpu), pSReg); 2686 2676 #else 2687 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( pSReg));2677 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(IEMCPU_TO_VMCPU(pIemCpu), pSReg)); 2688 2678 #endif 2689 2679 return pSReg; -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r41906 r42407 986 986 here, but that is ruled out by offSeg being 32-bit, right?) */ 987 987 uint64_t u64Base; 988 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy); 989 if (Desc.Legacy.Gen.u1Granularity) 990 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 988 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 991 989 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 992 990 u64Base = 0; … … 998 996 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 999 997 } 1000 u64Base = X86DESC_BASE( Desc.Legacy);998 u64Base = X86DESC_BASE(&Desc.Legacy); 1001 999 } 1002 1000 … … 1021 1019 pCtx->cs.ValidSel = pCtx->cs.Sel; 1022 1020 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1023 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);1021 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 1024 1022 pCtx->cs.u32Limit = cbLimit; 1025 1023 pCtx->cs.u64Base = u64Base; … … 1180 1178 /* Limit / canonical check. */ 1181 1179 uint64_t u64Base; 1182 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy); 1183 if (Desc.Legacy.Gen.u1Granularity) 1184 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1185 1180 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 1186 1181 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1187 1182 { … … 1200 1195 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel); 1201 1196 } 1202 u64Base = X86DESC_BASE( Desc.Legacy);1197 u64Base = X86DESC_BASE(&Desc.Legacy); 1203 1198 } 1204 1199 … … 1245 1240 pCtx->cs.ValidSel = pCtx->cs.Sel; 1246 1241 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1247 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);1242 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 1248 1243 pCtx->cs.u32Limit = cbLimit; 1249 1244 pCtx->cs.u64Base = u64Base; … … 1485 1480 1486 1481 /* Calc SS limit.*/ 1487 uint32_t cbLimitSs = X86DESC_LIMIT(DescSs.Legacy); 1488 if (DescSs.Legacy.Gen.u1Granularity) 1489 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1490 1482 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSs.Legacy); 1491 1483 1492 1484 /* Is RIP canonical or within CS.limit? */ 1493 1485 uint64_t u64Base; 1494 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy); 1495 if (DescCs.Legacy.Gen.u1Granularity) 1496 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1486 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy); 1497 1487 1498 1488 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) … … 1513 1503 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1514 1504 } 1515 u64Base = X86DESC_BASE( DescCs.Legacy);1505 u64Base = X86DESC_BASE(&DescCs.Legacy); 1516 1506 } 1517 1507 … … 1553 1543 pCtx->cs.ValidSel = uNewCs; 1554 1544 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1555 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCs.Legacy);1545 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy); 1556 1546 pCtx->cs.u32Limit = cbLimitCs; 1557 1547 pCtx->cs.u64Base = u64Base; … … 1560 1550 pCtx->ss.ValidSel = uNewOuterSs; 1561 1551 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 1562 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR( DescSs.Legacy);1552 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSs.Legacy); 1563 1553 pCtx->ss.u32Limit = cbLimitSs; 1564 1554 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) 1565 1555 pCtx->ss.u64Base = 0; 1566 1556 else 1567 pCtx->ss.u64Base = X86DESC_BASE( DescSs.Legacy);1557 pCtx->ss.u64Base = X86DESC_BASE(&DescSs.Legacy); 1568 1558 1569 1559 pIemCpu->uCpl = (uNewCs & X86_SEL_RPL); … … 1588 1578 /* Limit / canonical check. */ 1589 1579 uint64_t u64Base; 1590 uint32_t cbLimitCs = X86DESC_LIMIT(DescCs.Legacy); 1591 if (DescCs.Legacy.Gen.u1Granularity) 1592 cbLimitCs = (cbLimitCs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1580 uint32_t cbLimitCs = X86DESC_LIMIT_G(&DescCs.Legacy); 1593 1581 1594 1582 if (pIemCpu->enmCpuMode == IEMMODE_64BIT) … … 1608 1596 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uNewCs); 1609 1597 } 1610 u64Base = X86DESC_BASE( DescCs.Legacy);1598 u64Base = X86DESC_BASE(&DescCs.Legacy); 1611 1599 } 1612 1600 … … 1638 1626 pCtx->cs.ValidSel = uNewCs; 1639 1627 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1640 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCs.Legacy);1628 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCs.Legacy); 1641 1629 pCtx->cs.u32Limit = cbLimitCs; 1642 1630 pCtx->cs.u64Base = u64Base; … … 2019 2007 } 2020 2008 2021 uint32_t cbLimitCS = X86DESC_LIMIT(DescCS.Legacy); 2022 if (DescCS.Legacy.Gen.u1Granularity) 2023 cbLimitCS = (cbLimitCS << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2009 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy); 2024 2010 2025 2011 /* … … 2100 2086 } 2101 2087 2102 uint32_t cbLimitSs = X86DESC_LIMIT(DescSS.Legacy); 2103 if (DescSS.Legacy.Gen.u1Granularity) 2104 cbLimitSs = (cbLimitSs << PAGE_SHIFT) | PAGE_OFFSET_MASK; 2088 uint32_t cbLimitSs = X86DESC_LIMIT_G(&DescSS.Legacy); 2105 2089 2106 2090 /* Check EIP. */ … … 2135 2119 pCtx->cs.ValidSel = uNewCs; 2136 2120 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2137 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCS.Legacy);2121 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2138 2122 pCtx->cs.u32Limit = cbLimitCS; 2139 pCtx->cs.u64Base = X86DESC_BASE( DescCS.Legacy);2123 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2140 2124 pCtx->rsp = uNewESP; 2141 2125 pCtx->ss.Sel = uNewSS; 2142 2126 pCtx->ss.ValidSel = uNewSS; 2143 2127 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 2144 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR( DescSS.Legacy);2128 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy); 2145 2129 pCtx->ss.u32Limit = cbLimitSs; 2146 pCtx->ss.u64Base = X86DESC_BASE( DescSS.Legacy);2130 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy); 2147 2131 2148 2132 uint32_t fEFlagsMask = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF … … 2193 2177 pCtx->cs.ValidSel = uNewCs; 2194 2178 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 2195 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR( DescCS.Legacy);2179 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy); 2196 2180 pCtx->cs.u32Limit = cbLimitCS; 2197 pCtx->cs.u64Base = X86DESC_BASE( DescCS.Legacy);2181 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy); 2198 2182 pCtx->rsp = uNewRsp; 2199 2183 … … 2357 2341 if (iSegReg == X86_SREG_SS) /* SS gets different treatment */ 2358 2342 { 2359 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)2360 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )2361 {2362 Log(("load sreg SS, %#x - code or read only (%#x) -> #GP\n", uSel, Desc.Legacy.Gen.u4Type));2363 return iemRaiseGeneralProtectionFaultBySelector(pIemCpu, uSel);2364 }2365 2343 if ( (Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE) 2366 2344 || !(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) ) … … 2423 2401 2424 2402 /* The base and limit. */ 2403 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy); 2425 2404 uint64_t u64Base; 2426 uint32_t cbLimit = X86DESC_LIMIT(Desc.Legacy);2427 if (Desc.Legacy.Gen.u1Granularity)2428 cbLimit = (cbLimit << PAGE_SHIFT) | PAGE_OFFSET_MASK;2429 2430 2405 if ( pIemCpu->enmCpuMode == IEMMODE_64BIT 2431 2406 && iSegReg < X86_SREG_FS) 2432 2407 u64Base = 0; 2433 2408 else 2434 u64Base = X86DESC_BASE( Desc.Legacy);2409 u64Base = X86DESC_BASE(&Desc.Legacy); 2435 2410 2436 2411 /* … … 2448 2423 /* commit */ 2449 2424 *pSel = uSel; 2450 pHid->Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);2425 pHid->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2451 2426 pHid->u32Limit = cbLimit; 2452 2427 pHid->u64Base = u64Base; … … 2723 2698 uint64_t u64Base; 2724 2699 if (!IEM_IS_LONG_MODE(pIemCpu)) 2725 u64Base = X86DESC_BASE( Desc.Legacy);2700 u64Base = X86DESC_BASE(&Desc.Legacy); 2726 2701 else 2727 2702 { … … 2732 2707 } 2733 2708 2734 u64Base = X86DESC64_BASE( Desc.Long);2709 u64Base = X86DESC64_BASE(&Desc.Long); 2735 2710 if (!IEM_IS_CANONICAL(u64Base)) 2736 2711 { … … 2757 2732 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK; 2758 2733 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 2759 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);2760 pCtx->ldtr.u32Limit = X86DESC_LIMIT (Desc.Legacy);2734 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2735 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); 2761 2736 pCtx->ldtr.u64Base = u64Base; 2762 2737 … … 2822 2797 uint64_t u64Base; 2823 2798 if (!IEM_IS_LONG_MODE(pIemCpu)) 2824 u64Base = X86DESC_BASE( Desc.Legacy);2799 u64Base = X86DESC_BASE(&Desc.Legacy); 2825 2800 else 2826 2801 { … … 2831 2806 } 2832 2807 2833 u64Base = X86DESC64_BASE( Desc.Long);2808 u64Base = X86DESC64_BASE(&Desc.Long); 2834 2809 if (!IEM_IS_CANONICAL(u64Base)) 2835 2810 { … … 2878 2853 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK; 2879 2854 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID; 2880 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR( Desc.Legacy);2881 pCtx->tr.u32Limit = X86DESC_LIMIT (Desc.Legacy);2855 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy); 2856 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&Desc.Legacy); 2882 2857 pCtx->tr.u64Base = u64Base; 2883 2858 -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r42186 r42407 31 31 #include <VBox/param.h> 32 32 #include <iprt/assert.h> 33 #include <VBox/log.h>34 33 #include <VBox/vmm/vmm.h> 35 34 #include <iprt/x86.h> 35 36 37 /******************************************************************************* 38 * Global Variables * 39 *******************************************************************************/ 40 #if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0) 41 /** Segment register names. */ 42 static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" }; 43 #endif 36 44 37 45 … … 65 73 } 66 74 67 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE( Desc)) & 0xffffffff);75 return (RTGCPTR)(((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)) & 0xffffffff); 68 76 } 69 77 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ … … 105 113 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 106 114 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */ 107 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))115 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 108 116 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); 109 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs))117 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)) 110 118 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs); 111 119 #else 112 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg));113 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs));120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 114 122 #endif 115 123 … … 168 176 if (ppvGC) 169 177 { 170 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))178 if (CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 171 179 *ppvGC = pSReg->u64Base + uFlat; 172 180 else … … 178 186 179 187 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 180 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))188 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 181 189 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg); 182 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs))190 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)) 183 191 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs); 184 192 #else 185 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg));186 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID( &pCtxCore->cs));193 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 194 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 187 195 #endif 188 196 … … 347 355 348 356 /* calc limit. */ 349 uint32_t u32Limit = X86DESC_LIMIT(Desc); 350 if (Desc.Gen.u1Granularity) 351 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 357 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc); 352 358 353 359 /* calc address assuming straight stuff. */ 354 RTGCPTR pvFlat = Addr + X86DESC_BASE( Desc);360 RTGCPTR pvFlat = Addr + X86DESC_BASE(&Desc); 355 361 356 362 /* Cut the address to 32 bits. */ … … 475 481 476 482 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 483 484 static void selLoadHiddenSelectorRegFromGuestTable(PVMCPU pVCpu, PCCPUMCTX pCtx, PCPUMSELREG pSReg, 485 RTGCPTR GCPtrDesc, RTSEL const Sel, uint32_t const iSReg) 486 { 487 /* 488 * Try read the entry. 489 */ 490 X86DESC GstDesc; 491 int rc = PGMPhysReadGCPtr(pVCpu, &GstDesc, GCPtrDesc, sizeof(GstDesc)); 492 if (RT_FAILURE(rc)) 493 { 494 Log(("SELMLoadHiddenSelectorReg: Error reading descriptor %s=%#x: %Rrc\n", g_aszSRegNms[iSReg], Sel, rc)); 495 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelReadErrors); 496 return; 497 } 498 499 /* 500 * Validate it and load it. 501 */ 502 if (!selmIsGstDescGoodForSReg(pVCpu, pSReg, &GstDesc, iSReg, CPUMGetGuestCPL(pVCpu))) 503 { 504 Log(("SELMLoadHiddenSelectorReg: Guest table entry is no good (%s=%#x): %.8Rhxs\n", g_aszSRegNms[iSReg], Sel, &GstDesc)); 505 STAM_REL_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGstNoGood); 506 return; 507 } 508 509 selmLoadHiddenSRegFromGuestDesc(pVCpu, pSReg, &GstDesc); 510 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (gst)\n", 511 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel)); 512 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelGst); 513 } 514 515 477 516 /** 478 517 * CPUM helper that loads the hidden selector register from the descriptor table … … 500 539 Assert(pVM->cCpus == 1); 501 540 502 RTSEL const Sel = pSReg->Sel; 503 504 /** @todo Consider loading these from the shadow tables when possible? */ 505 /* 506 * Calculate descriptor table entry address. 507 */ 508 RTGCPTR GCPtrDesc; 541 542 /* 543 * Get the shadow descriptor table entry and validate it. 544 * Should something go amiss, try the guest table. 545 */ 546 RTSEL const Sel = pSReg->Sel; 547 uint32_t const iSReg = pSReg - CPUMCTX_FIRST_SREG(pCtx); Assert(iSReg < X86_SREG_COUNT); 548 PCX86DESC pShwDesc; 509 549 if (!(Sel & X86_SEL_LDT)) 510 550 { 511 if ((Sel & X86_SEL_MASK) >= pCtx->gdtr.cbGdt) 551 /** @todo this shall not happen, we shall check for these things when executing 552 * LGDT */ 553 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->gdtr.cbGdt); 554 555 pShwDesc = &pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 556 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT) 557 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu))) 512 558 { 513 AssertFailed(); /** @todo count these. */559 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK), Sel, iSReg); 514 560 return; 515 561 } 516 GCPtrDesc = pCtx->gdtr.pGdt + (Sel & X86_SEL_MASK);517 /** @todo Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; for cases518 * where we don't change it too much. */519 562 } 520 563 else 521 564 { 522 if ((Sel & X86_SEL_MASK) >= pCtx->ldtr.u32Limit) 565 /** @todo this shall not happen, we shall check for these things when executing 566 * LLDT */ 567 AssertReturnVoid((Sel | X86_SEL_RPL | X86_SEL_LDT) <= pCtx->ldtr.u32Limit); 568 569 pShwDesc = (PCX86DESC)((uintptr_t)pVM->selm.s.CTX_SUFF(pvLdt) + pVM->selm.s.offLdtHyper + (Sel & X86_SEL_MASK)); 570 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT) 571 || !selmIsShwDescGoodForSReg(pSReg, pShwDesc, iSReg, CPUMGetGuestCPL(pVCpu))) 523 572 { 524 AssertFailed(); /** @todo count these. */573 selLoadHiddenSelectorRegFromGuestTable(pVCpu, pCtx, pSReg, pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK), Sel, iSReg); 525 574 return; 526 575 } 527 GCPtrDesc = pCtx->ldtr.u64Base + (Sel & X86_SEL_MASK); 528 } 529 530 /* 531 * Try read the entry. 532 */ 533 X86DESC Desc; 534 int rc = PGMPhysReadGCPtr(pVCpu, &Desc, GCPtrDesc, sizeof(Desc)); 535 if (RT_FAILURE(rc)) 536 { 537 //RT_ZERO(Desc); 538 //if (!(Sel & X86_SEL_LDT)) 539 // Desc = pVM->selm.s.CTX_SUFF(paGdt)[Sel >> X86_SEL_SHIFT]; 540 //if (!Desc.Gen.u1Present) 541 { 542 AssertFailed(); /** @todo count these. */ 543 return; 544 } 545 } 546 547 /* 548 * Digest it and store the result. 549 */ 550 if ( !Desc.Gen.u1Present 551 || !Desc.Gen.u1DescType) 552 { 553 AssertFailed(); /** @todo count these. */ 554 return; 555 } 556 557 uint32_t u32Limit = X86DESC_LIMIT(Desc); 558 if (Desc.Gen.u1Granularity) 559 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 560 pSReg->u32Limit = u32Limit; 561 562 pSReg->u64Base = X86DESC_BASE(Desc); 563 pSReg->Attr.u = X86DESC_GET_HID_ATTR(Desc); 564 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 565 pSReg->ValidSel = Sel; 566 } 567 #endif /* VBOX_WITH_RAW_MODE */ 568 576 } 577 578 /* 579 * All fine, load it. 580 */ 581 selmLoadHiddenSRegFromShadowDesc(pSReg, pShwDesc); 582 STAM_COUNTER_INC(&pVCpu->CTX_SUFF(pVM)->selm.s.StatLoadHidSelShw); 583 Log(("SELMLoadHiddenSelectorReg: loaded %s=%#x:{b=%llx, l=%x, a=%x, vs=%x} (shw)\n", 584 g_aszSRegNms[iSReg], Sel, pSReg->u64Base, pSReg->u32Limit, pSReg->Attr.u, pSReg->ValidSel)); 585 } 586 587 #endif /* VBOX_WITH_RAW_MODE_NOT_R0 */ 569 588 570 589 /** … … 583 602 { 584 603 RTGCUINTPTR uFlat = Addr & 0xffff; 585 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SReg))604 if (!pSReg || !CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)) 586 605 uFlat += (RTGCUINTPTR)SelCS << 4; 587 606 else … … 646 665 * Limit check. 647 666 */ 648 uint32_t u32Limit = X86DESC_LIMIT(Desc); 649 if (Desc.Gen.u1Granularity) 650 u32Limit = (u32Limit << PAGE_SHIFT) | PAGE_OFFSET_MASK; 667 uint32_t u32Limit = X86DESC_LIMIT_G(&Desc); 651 668 if ((RTGCUINTPTR)Addr <= u32Limit) 652 669 { 653 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE( Desc));670 *ppvFlat = (RTGCPTR)((RTGCUINTPTR)Addr + X86DESC_BASE(&Desc)); 654 671 /* Cut the address to 32 bits. */ 655 672 *ppvFlat &= 0xffffffff; … … 765 782 return selmValidateAndConvertCSAddrRawMode(pVCpu->CTX_SUFF(pVM), pVCpu, SelCPL, SelCS, Addr, ppvFlat, NULL); 766 783 767 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SRegCS))784 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS)) 768 785 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSRegCS); 769 786 … … 775 792 SelCS &= ~X86_SEL_RPL; 776 793 #else 777 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(p SRegCS));794 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSRegCS)); 778 795 Assert(pSRegCS->Sel == SelCS); 779 796 #endif
Note:
See TracChangeset
for help on using the changeset viewer.