Changeset 47652 in vbox
- Timestamp:
- Aug 9, 2013 2:56:17 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 87854
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r47646 r47652 526 526 VMMR0/HMR0.cpp \ 527 527 VMMR0/HMR0A.asm \ 528 VMMR0/HMVMXR0.cpp \ 529 VMMR0/HMSVMR0.cpp \ 528 530 VMMR0/PDMR0Device.cpp \ 529 531 VMMR0/PDMR0Driver.cpp \ … … 583 585 VMMAll/PDMAllNetShaper.cpp 584 586 endif 585 ifdef VBOX_WITH_OLD_VTX_CODE586 VMMR0_SOURCES += VMMR0/HWVMXR0.cpp587 else588 VMMR0_SOURCES += VMMR0/HMVMXR0.cpp589 endif590 ifdef VBOX_WITH_OLD_AMDV_CODE591 VMMR0_SOURCES += VMMR0/HWSVMR0.cpp592 else593 VMMR0_SOURCES += VMMR0/HMSVMR0.cpp594 endif595 587 VMMR0_SOURCES.amd64 = \ 596 588 VMMR0/VMMR0JmpA-amd64.asm -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r47328 r47652 2424 2424 VMMDECL(bool) CPUMIsGuestInPAEMode(PVMCPU pVCpu) 2425 2425 { 2426 #ifdef VBOX_WITH_OLD_VTX_CODE2427 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE)2428 && (pVCpu->cpum.s.Guest.cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)2429 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LMA);2430 #else2431 2426 return (pVCpu->cpum.s.Guest.cr4 & X86_CR4_PAE) 2432 2427 && (pVCpu->cpum.s.Guest.cr0 & X86_CR0_PG) 2433 2428 && !(pVCpu->cpum.s.Guest.msrEFER & MSR_K6_EFER_LME); 2434 #endif2435 2429 } 2436 2430 -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r47472 r47652 1515 1515 guests, so we must make sure the recompiler flushes its TLB the next 1516 1516 time it executes code. */ 1517 if ( pVM->hm.s.fNestedPaging 1518 #ifdef VBOX_WITH_OLD_VTX_CODE 1519 && CPUMIsGuestInPagedProtectedModeEx(pCtx) 1520 #else 1521 && CPUMIsGuestPagingEnabledEx(pCtx) 1522 #endif 1523 ) 1517 if ( pVM->hm.s.fNestedPaging 1518 && CPUMIsGuestPagingEnabledEx(pCtx)) 1524 1519 { 1525 1520 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r47608 r47652 61 61 %endif 62 62 63 %ifndef VBOX_WITH_OLD_VTX_CODE 64 %ifdef RT_ARCH_AMD64 65 %define VBOX_SKIP_RESTORE_SEG 66 %endif 63 %ifdef RT_ARCH_AMD64 64 %define VBOX_SKIP_RESTORE_SEG 67 65 %endif 68 66 -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r47610 r47652 19 19 ; 20 20 21 %ifndef VBOX_WITH_OLD_VTX_CODE 22 %ifdef RT_ARCH_AMD64 23 ;; 24 ; Keep these macro definitions in this file as it gets included and compiled 25 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once. 26 %define VMX_SKIP_GDTR 27 %ifndef RT_OS_DARWIN 28 ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}. 29 %define VMX_SKIP_IDTR 30 %endif 31 %define VMX_SKIP_TR 21 %ifdef RT_ARCH_AMD64 22 ;; 23 ; Keep these macro definitions in this file as it gets included and compiled 24 ; with RT_ARCH_AMD64 once and RT_ARCH_X86 once. 25 %define VMX_SKIP_GDTR 26 %ifndef RT_OS_DARWIN 27 ; Darwin (Mavericks) uses IDTR limit to store the CPUID so we need to restore it always. See @bugref{6875}. 28 %define VMX_SKIP_IDTR 32 29 %endif 30 %define VMX_SKIP_TR 33 31 %endif 34 32 … … 198 196 mov [ss:xDI + CPUMCTX.esi], esi 199 197 mov [ss:xDI + CPUMCTX.ebp], ebp 200 %ifndef VBOX_WITH_OLD_VTX_CODE201 198 mov xAX, cr2 202 199 mov [ss:xDI + CPUMCTX.cr2], xAX 203 %endif204 200 205 201 %ifdef RT_ARCH_AMD64 … … 251 247 jnz .cached_read 252 248 .no_cached_reads: 253 254 %ifdef VBOX_WITH_OLD_VTX_CODE255 ; Restore CR2 into VMCS-cache field (for EPT).256 mov xAX, cr2257 mov [ss:xDX + VMCSCACHE.cr2], xAX258 %endif259 249 %endif 260 250 … … 431 421 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 432 422 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 433 %else434 %ifdef VBOX_WITH_OLD_VTX_CODE435 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}436 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE437 %endif438 423 %endif 439 424 … … 545 530 mov qword [xDI + CPUMCTX.r14], r14 546 531 mov qword [xDI + CPUMCTX.r15], r15 547 %ifndef VBOX_WITH_OLD_VTX_CODE548 532 mov rax, cr2 549 533 mov qword [xDI + CPUMCTX.cr2], rax 550 %endif551 534 552 535 pop xAX ; The guest edi we pushed above … … 582 565 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR 583 566 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR 584 %else585 %ifdef VBOX_WITH_OLD_VTX_CODE586 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}587 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE588 %endif589 567 %endif 590 568 … … 605 583 jnz .cached_read 606 584 .no_cached_reads: 607 608 %ifdef VBOX_WITH_OLD_VTX_CODE609 ; Restore CR2 into VMCS-cache field (for EPT).610 mov xAX, cr2611 mov [xDX + VMCSCACHE.cr2], xAX612 %endif613 585 %endif 614 586 … … 664 636 LOADHOSTMSR MSR_K6_STAR 665 637 LOADHOSTMSR MSR_K8_LSTAR 666 %else667 %ifdef VBOX_WITH_OLD_VTX_CODE668 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}669 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE670 %endif671 638 %endif 672 639 … … 720 687 LOADHOSTMSR MSR_K6_STAR 721 688 LOADHOSTMSR MSR_K8_LSTAR 722 %else723 %ifdef VBOX_WITH_OLD_VTX_CODE724 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}725 LOADHOSTMSR MSR_K8_KERNEL_GS_BASE726 %endif727 689 %endif 728 690 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r47514 r47652 1328 1328 pCtx->dr[7] &= 0xffffffff; /* Upper 32 bits MBZ. */ 1329 1329 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* MBZ. */ 1330 pCtx->dr[7] |= 0x400;/* MB1. */1330 pCtx->dr[7] |= X86_DR7_INIT_VAL; /* MB1. */ 1331 1331 1332 1332 /* Update DR6, DR7 with the guest values. */ … … 1605 1605 1606 1606 /* Clear any unused and reserved bits. */ 1607 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_MSR /* Unused (legacy). */ 1608 | HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */ 1607 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */ 1609 1608 | HM_CHANGED_GUEST_RSP 1610 1609 | HM_CHANGED_GUEST_RFLAGS -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47643 r47652 6878 6878 6879 6879 /* Clear any unused and reserved bits. */ 6880 pVCpu->hm.s.fContextUseFlags &= ~( HM_CHANGED_GUEST_CR2 6881 | HM_CHANGED_GUEST_MSR /* legacy */); 6880 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2; 6882 6881 6883 6882 AssertMsg(!pVCpu->hm.s.fContextUseFlags, -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r47473 r47652 62 62 #endif 63 63 64 #ifdef VBOX_WITH_OLD_VTX_CODE65 # ifdef VMX_USE_CACHED_VMCS_ACCESSES66 # define VMXReadCachedVmcs(idxField, pVal) VMXReadCachedVmcsEx(pVCpu, idxField##_CACHE_IDX, pVal)67 # else68 # define VMXReadCachedVmcs VMXReadVmcsField69 # endif70 # define VMXReadVmcs VMXReadVmcsField71 #else /* !VBOX_WITH_OLD_VTX_CODE */72 64 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 73 65 # define VMXReadVmcsHstN(idxField, p64Val) HMVMX_IS_64BIT_HOST_MODE() ? \ … … 88 80 # define VMXReadVmcsGstNByIdxVal VMXReadVmcs64 89 81 # endif 90 #endif /* !VBOX_WITH_OLD_VTX_CODE */91 82 92 83 #endif /* IN_RING0 */ -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r47635 r47652 975 975 RTGCPHYS GCPhys = 0; 976 976 977 #ifndef VBOX_WITH_OLD_VTX_CODE978 977 LogRel(("HM: Using VT-x implementation 2.0!\n")); 979 #endif980 978 LogRel(("HM: Host CR4 = %#RX64\n", pVM->hm.s.vmx.hostCR4)); 981 979 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %#RX64\n", pVM->hm.s.vmx.msr.feature_ctrl)); … … 1321 1319 LogRel(("HM: Ignoring VPID capabilities of CPU.\n")); 1322 1320 1323 /** TPR patching would never have worked on Intel. Leaving it here for the old1324 * code's sake. See @bugref{6398}. */1325 #ifdef VBOX_WITH_OLD_VTX_CODE1326 /*1327 * TPR patching status logging.1328 */1329 if (pVM->hm.s.fTRPPatchingAllowed)1330 {1331 if ( (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1332 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))1333 {1334 pVM->hm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */1335 LogRel(("HM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));1336 }1337 else1338 {1339 uint32_t u32Eax, u32Dummy;1340 1341 /* TPR patching needs access to the MSR_K8_LSTAR msr. */1342 ASMCpuId(0x80000000, &u32Eax, &u32Dummy, &u32Dummy, &u32Dummy);1343 if ( u32Eax < 0x800000011344 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))1345 {1346 pVM->hm.s.fTRPPatchingAllowed = false;1347 LogRel(("HM: TPR patching disabled (long mode not supported).\n"));1348 }1349 }1350 }1351 LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));1352 #endif1353 1354 1355 1321 /* 1356 1322 * Check for preemption timer config override and log the state of it. … … 1381 1347 Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported)); 1382 1348 1383 #ifndef VBOX_WITH_OLD_AMDV_CODE1384 1349 LogRel(("HM: Using AMD-V implementation 2.0!\n")); 1385 #endif1386 1350 1387 1351 uint32_t u32Family; … … 1510 1474 { 1511 1475 PVMCPU pVCpu = &pVM->aCpus[i]; 1512 1513 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu); 1514 #ifdef VBOX_WITH_OLD_VTX_CODE 1515 Assert(pVCpu->hm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu)); 1516 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu); 1517 #endif 1476 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu); 1518 1477 } 1519 1478 } … … 1561 1520 pVCpu->hm.s.enmShadowMode = enmShadowMode; 1562 1521 1563 #ifdef VBOX_WITH_OLD_VTX_CODE 1564 if ( pVM->hm.s.vmx.fEnabled 1565 && HMIsEnabled(pVM)) 1566 { 1567 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL 1568 && enmGuestMode >= PGMMODE_PROTECTED) 1569 { 1570 PCPUMCTX pCtx; 1571 1572 pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1573 1574 /* After a real mode switch to protected mode we must force 1575 CPL to 0. Our real mode emulation had to set it to 3. */ 1576 pCtx->ss.Attr.n.u2Dpl = 0; 1577 } 1578 } 1579 1580 if (pVCpu->hm.s.vmx.enmCurrGuestMode != enmGuestMode) 1581 { 1582 /* Keep track of paging mode changes. */ 1583 pVCpu->hm.s.vmx.enmPrevGuestMode = pVCpu->hm.s.vmx.enmCurrGuestMode; 1584 pVCpu->hm.s.vmx.enmCurrGuestMode = enmGuestMode; 1585 1586 /* Did we miss a change, because all code was executed in the recompiler? */ 1587 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode == enmGuestMode) 1588 { 1589 Log(("HMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hm.s.vmx.enmPrevGuestMode), 1590 PGMGetModeName(pVCpu->hm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmLastSeenGuestMode))); 1591 pVCpu->hm.s.vmx.enmLastSeenGuestMode = pVCpu->hm.s.vmx.enmPrevGuestMode; 1592 } 1593 } 1594 #else 1595 /* If the guest left protected mode VMX execution, we'll have to be extra 1596 * careful if/when the guest switches back to protected mode. 1522 /* 1523 * If the guest left protected mode VMX execution, we'll have to be 1524 * extra careful if/when the guest switches back to protected mode. 1597 1525 */ 1598 1526 if (enmGuestMode == PGMMODE_REAL) … … 1601 1529 pVCpu->hm.s.vmx.fWasInRealMode = true; 1602 1530 } 1603 #endif 1604 1605 /** @todo r=ramshankar: Why do we need to do this? */ 1531 1532 /** @todo r=ramshankar: Disabling for now. If nothing breaks remove it 1533 * eventually. (Test platforms that use the cache ofc). */ 1534 #if 0 1606 1535 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 1607 1536 /* Reset the contents of the read cache. */ … … 1610 1539 pCache->Read.aFieldVal[j] = 0; 1611 1540 #endif 1541 #endif 1612 1542 } 1613 1543 … … 1683 1613 pVCpu->hm.s.fContextUseFlags = (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST); 1684 1614 1685 pVCpu->hm.s.vmx.u32CR0Mask = 0; 1686 pVCpu->hm.s.vmx.u32CR4Mask = 0; 1687 1688 pVCpu->hm.s.fActive = false; 1689 pVCpu->hm.s.Event.fPending = false; 1690 1691 #ifdef VBOX_WITH_OLD_VTX_CODE 1692 /* Reset state information for real-mode emulation in VT-x. */ 1693 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL; 1694 pVCpu->hm.s.vmx.enmPrevGuestMode = PGMMODE_REAL; 1695 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMMODE_REAL; 1696 #else 1615 pVCpu->hm.s.vmx.u32CR0Mask = 0; 1616 pVCpu->hm.s.vmx.u32CR4Mask = 0; 1617 pVCpu->hm.s.fActive = false; 1618 pVCpu->hm.s.Event.fPending = false; 1697 1619 pVCpu->hm.s.vmx.fWasInRealMode = true; 1698 #endif1699 1620 1700 1621 /* Reset the contents of the read cache. */ … … 2566 2487 /* Verify the requirements for executing code in protected 2567 2488 mode. VT-x can't handle the CPU state right after a switch 2568 from real to protected mode. (all sorts of RPL & DPL assumptions) */ 2569 #if VBOX_WITH_OLD_VTX_CODE 2570 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL 2571 && enmGuestMode >= PGMMODE_PROTECTED) 2572 #else 2489 from real to protected mode. (all sorts of RPL & DPL assumptions). */ 2573 2490 if (pVCpu->hm.s.vmx.fWasInRealMode) 2574 #endif2575 2491 { 2576 //@todo: If guest is in V86 mode, these checks should be different! 2577 #if VBOX_WITH_OLD_VTX_CODE 2578 if ( (pCtx->cs.Sel & X86_SEL_RPL) 2579 || (pCtx->ds.Sel & X86_SEL_RPL) 2580 || (pCtx->es.Sel & X86_SEL_RPL) 2581 || (pCtx->fs.Sel & X86_SEL_RPL) 2582 || (pCtx->gs.Sel & X86_SEL_RPL) 2583 || (pCtx->ss.Sel & X86_SEL_RPL)) 2584 { 2585 STAM_COUNTER_INC(&pVCpu->hm.s.StatVmxCheckBadSel); 2586 return false; 2587 } 2588 #else 2492 /** @todo If guest is in V86 mode, these checks should be different! */ 2589 2493 if ((pCtx->cs.Sel & X86_SEL_RPL) != (pCtx->ss.Sel & X86_SEL_RPL)) 2590 2494 { … … 2602 2506 return false; 2603 2507 } 2604 #endif2605 2508 } 2606 /* VT-x also chokes on invalid tr or ldtr selectors (minix)*/2509 /* VT-x also chokes on invalid TR or LDTR selectors (minix). */ 2607 2510 if (pCtx->gdtr.cbGdt) 2608 2511 { … … 2626 2529 && !pVM->hm.s.vmx.fUnrestrictedGuest) 2627 2530 { 2628 #ifdef VBOX_WITH_OLD_VTX_CODE 2629 /** @todo This should (probably) be set on every excursion to the REM, 2630 * however it's too risky right now. So, only apply it when we go 2631 * back to REM for real mode execution. (The XP hack below doesn't 2632 * work reliably without this.) 2633 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HM. */ 2634 for (uint32_t i = 0; i < pVM->cCpus; i++) 2635 pVM->aCpus[i].hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 2636 #endif 2637 2638 if ( !pVM->hm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */ 2639 || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */ 2531 if ( !pVM->hm.s.fNestedPaging /* Requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap */ 2532 || CPUMIsGuestInRealModeEx(pCtx)) /* Requires a fake TSS for real mode - stored in the VMM device heap */ 2640 2533 return false; 2641 2534 … … 2658 2551 if (pCtx->rsp >= pCtx->ss.u32Limit) 2659 2552 return false; 2660 #if 02661 if ( pCtx->cs.Sel >= pCtx->gdtr.cbGdt2662 || pCtx->ss.Sel >= pCtx->gdtr.cbGdt2663 || pCtx->ds.Sel >= pCtx->gdtr.cbGdt2664 || pCtx->es.Sel >= pCtx->gdtr.cbGdt2665 || pCtx->fs.Sel >= pCtx->gdtr.cbGdt2666 || pCtx->gs.Sel >= pCtx->gdtr.cbGdt)2667 return false;2668 #endif2669 2553 } 2670 2554 } … … 2730 2614 * when the unrestricted guest execution feature is missing (VT-x only). 2731 2615 */ 2732 #ifdef VBOX_WITH_OLD_VTX_CODE2733 if ( pVM->hm.s.vmx.fEnabled2734 && !pVM->hm.s.vmx.fUnrestrictedGuest2735 && !CPUMIsGuestInPagedProtectedModeEx(pCtx)2736 && !PDMVmmDevHeapIsEnabled(pVM)2737 && (pVM->hm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))2738 return true;2739 #else2740 2616 if ( pVM->hm.s.vmx.fEnabled 2741 2617 && !pVM->hm.s.vmx.fUnrestrictedGuest 2742 2618 && CPUMIsGuestInRealModeEx(pCtx) 2743 2619 && !PDMVmmDevHeapIsEnabled(pVM)) 2620 { 2744 2621 return true; 2745 #endif 2622 } 2746 2623 2747 2624 return false; … … 3027 2904 AssertRCReturn(rc, rc); 3028 2905 3029 #ifdef VBOX_WITH_OLD_VTX_CODE 3030 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode); 3031 AssertRCReturn(rc, rc); 3032 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode); 3033 AssertRCReturn(rc, rc); 3034 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode); 3035 AssertRCReturn(rc, rc); 3036 #else 3037 //@todo: We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and 3038 // perhaps not even that (the initial value of 'true' is safe). 2906 /** @todo We only need to save pVM->aCpus[i].hm.s.vmx.fWasInRealMode and 2907 * perhaps not even that (the initial value of @c true is safe. */ 3039 2908 uint32_t u32Dummy = PGMMODE_REAL; 3040 2909 rc = SSMR3PutU32(pSSM, u32Dummy); … … 3044 2913 rc = SSMR3PutU32(pSSM, u32Dummy); 3045 2914 AssertRCReturn(rc, rc); 3046 #endif 3047 } 2915 } 2916 3048 2917 #ifdef VBOX_HM_WITH_GUEST_PATCHING 3049 2918 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem); … … 3136 3005 { 3137 3006 uint32_t val; 3138 3139 #ifdef VBOX_WITH_OLD_VTX_CODE 3140 rc = SSMR3GetU32(pSSM, &val); 3141 AssertRCReturn(rc, rc); 3142 pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val; 3143 3144 rc = SSMR3GetU32(pSSM, &val); 3145 AssertRCReturn(rc, rc); 3146 pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode = (PGMMODE)val; 3147 3148 rc = SSMR3GetU32(pSSM, &val); 3149 AssertRCReturn(rc, rc); 3150 pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode = (PGMMODE)val; 3151 #else 3152 //@todo: See note above re saving enmLastSeenGuestMode 3007 /** @todo See note in hmR3Save(). */ 3153 3008 rc = SSMR3GetU32(pSSM, &val); 3154 3009 AssertRCReturn(rc, rc); … … 3157 3012 rc = SSMR3GetU32(pSSM, &val); 3158 3013 AssertRCReturn(rc, rc); 3159 #endif3160 3014 } 3161 3015 } -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r46099 r47652 707 707 ; Manual save and restore: 708 708 ; - General purpose registers except RIP, RSP 709 ; 709 ; 710 710 ; Trashed: 711 711 ; - CR2 (we don't care) … … 721 721 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK 722 722 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE 723 %else724 %ifdef VBOX_WITH_OLD_VTX_CODE725 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}726 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE727 %endif728 723 %endif 729 724 … … 792 787 mov qword [rdi + CPUMCTX.r14], r14 793 788 mov qword [rdi + CPUMCTX.r15], r15 794 %ifndef VBOX_WITH_OLD_VTX_CODE795 789 mov rax, cr2 796 790 mov qword [rdi + CPUMCTX.cr2], rax 797 %endif798 791 799 792 pop rax ; The guest edi we pushed above … … 807 800 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR 808 801 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR 809 %else810 %ifdef VBOX_WITH_OLD_VTX_CODE811 ; The KERNEL_GS_BASE MSR does not work reliably with auto load/store. See @bugref{6208}812 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE813 %endif814 802 %endif 815 803 … … 840 828 jnz .cached_read 841 829 .no_cached_reads: 842 843 %ifdef VBOX_WITH_OLD_VTX_CODE844 ; Restore CR2 into VMCS-cache field (for EPT).845 mov rax, cr2846 mov [rdi + VMCSCACHE.cr2], rax847 %endif848 830 %ifdef VBOX_WITH_CRASHDUMP_MAGIC 849 831 mov dword [rdi + VMCSCACHE.uPos], 8 … … 955 937 ; Manual save and restore: 956 938 ; - General purpose registers except RIP, RSP, RAX 957 ; 939 ; 958 940 ; Trashed: 959 941 ; - CR2 (we don't care) -
trunk/src/VBox/VMM/include/HMInternal.h
r47619 r47652 38 38 #endif 39 39 40 #ifdef VBOX_WITH_OLD_VTX_CODE 41 # define VMX_USE_CACHED_VMCS_ACCESSES 42 #elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 40 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 43 41 # define VMX_USE_CACHED_VMCS_ACCESSES 44 42 #endif … … 88 86 #define HM_CHANGED_GUEST_LDTR RT_BIT(5) 89 87 #define HM_CHANGED_GUEST_TR RT_BIT(6) 90 #define HM_CHANGED_GUEST_MSR RT_BIT(7) /* Unused in new VT-x, AMD-V code. */ 91 #define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(8) 92 #define HM_CHANGED_GUEST_DEBUG RT_BIT(9) 93 #define HM_CHANGED_ALL_GUEST_BASE ( HM_CHANGED_GUEST_CR0 \ 94 | HM_CHANGED_GUEST_CR3 \ 95 | HM_CHANGED_GUEST_CR4 \ 96 | HM_CHANGED_GUEST_GDTR \ 97 | HM_CHANGED_GUEST_IDTR \ 98 | HM_CHANGED_GUEST_LDTR \ 99 | HM_CHANGED_GUEST_TR \ 100 | HM_CHANGED_GUEST_MSR \ 101 | HM_CHANGED_GUEST_SEGMENT_REGS \ 102 | HM_CHANGED_GUEST_DEBUG) 103 #define HM_CHANGED_ALL_GUEST HM_CHANGED_ALL_GUEST_BASE 104 105 /** New VT-x, AMD-V code uses extra flags for more fine-grained state 106 * tracking. */ 107 #if !defined(VBOX_WITH_OLD_VTX_CODE) || !defined(VBOX_WITH_OLD_AMDV_CODE) 108 # define HM_CHANGED_GUEST_RIP RT_BIT(10) 109 # define HM_CHANGED_GUEST_RSP RT_BIT(11) 110 # define HM_CHANGED_GUEST_RFLAGS RT_BIT(12) 111 # define HM_CHANGED_GUEST_CR2 RT_BIT(13) 112 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(14) 113 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(15) 114 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(16) 88 #define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(7) 89 #define HM_CHANGED_GUEST_DEBUG RT_BIT(8) 90 # define HM_CHANGED_GUEST_RIP RT_BIT(9) 91 # define HM_CHANGED_GUEST_RSP RT_BIT(10) 92 # define HM_CHANGED_GUEST_RFLAGS RT_BIT(11) 93 # define HM_CHANGED_GUEST_CR2 RT_BIT(12) 94 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR RT_BIT(13) 95 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR RT_BIT(14) 96 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR RT_BIT(15) 115 97 /* VT-x specific state. */ 116 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(1 7)117 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(1 8)118 # define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(1 9)119 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT( 20)120 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(2 1)98 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS RT_BIT(16) 99 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE RT_BIT(17) 100 # define HM_CHANGED_VMX_GUEST_APIC_STATE RT_BIT(18) 101 # define HM_CHANGED_VMX_ENTRY_CTLS RT_BIT(19) 102 # define HM_CHANGED_VMX_EXIT_CTLS RT_BIT(20) 121 103 /* AMD-V specific state. */ 122 # define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(17) 123 # define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(18) 124 # define HM_CHANGED_SVM_RESERVED1 RT_BIT(19) 125 # define HM_CHANGED_SVM_RESERVED2 RT_BIT(20) 126 # define HM_CHANGED_SVM_RESERVED3 RT_BIT(21) 127 128 # undef HM_CHANGED_ALL_GUEST 129 # define HM_CHANGED_ALL_GUEST ( HM_CHANGED_ALL_GUEST_BASE \ 104 # define HM_CHANGED_SVM_GUEST_EFER_MSR RT_BIT(16) 105 # define HM_CHANGED_SVM_GUEST_APIC_STATE RT_BIT(17) 106 # define HM_CHANGED_SVM_RESERVED1 RT_BIT(18) 107 # define HM_CHANGED_SVM_RESERVED2 RT_BIT(19) 108 # define HM_CHANGED_SVM_RESERVED3 RT_BIT(20) 109 110 # define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_CR0 \ 111 | HM_CHANGED_GUEST_CR3 \ 112 | HM_CHANGED_GUEST_CR4 \ 113 | HM_CHANGED_GUEST_GDTR \ 114 | HM_CHANGED_GUEST_IDTR \ 115 | HM_CHANGED_GUEST_LDTR \ 116 | HM_CHANGED_GUEST_TR \ 117 | HM_CHANGED_GUEST_SEGMENT_REGS \ 118 | HM_CHANGED_GUEST_DEBUG \ 130 119 | HM_CHANGED_GUEST_RIP \ 131 120 | HM_CHANGED_GUEST_RSP \ … … 140 129 | HM_CHANGED_VMX_ENTRY_CTLS \ 141 130 | HM_CHANGED_VMX_EXIT_CTLS) 142 #endif 143 144 #define HM_CHANGED_HOST_CONTEXT RT_BIT(22) 131 132 #define HM_CHANGED_HOST_CONTEXT RT_BIT(21) 145 133 /** @} */ 146 134 … … 355 343 #endif 356 344 357 #ifndef VBOX_WITH_OLD_VTX_CODE 345 /** Internal Id of which flush-handler to use for tagged-TLB entries. */ 358 346 unsigned uFlushTaggedTlb; 359 #else360 /** Ring 0 handlers for VT-x. */361 DECLR0CALLBACKMEMBER(void, pfnFlushTaggedTlb, (PVM pVM, PVMCPU pVCpu));362 #endif363 347 364 348 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) … … 665 649 } LastError; 666 650 667 #ifdef VBOX_WITH_OLD_VTX_CODE668 /** The last seen guest paging mode (by VT-x). */669 PGMMODE enmLastSeenGuestMode;670 /** Current guest paging mode (as seen by HMR3PagingModeChanged). */671 PGMMODE enmCurrGuestMode;672 /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */673 PGMMODE enmPrevGuestMode;674 #else675 651 /** Which host-state bits to restore before being preempted. */ 676 652 uint32_t fRestoreHostFlags; … … 679 655 /** Set if guest was executing in real mode (extra checks). */ 680 656 bool fWasInRealMode; 681 #endif682 657 } vmx; 683 658 -
trunk/src/VBox/VMM/include/HMInternal.mac
r46038 r47652 15 15 ; 16 16 17 %ifdef VBOX_WITH_OLD_VTX_CODE18 %define VMX_USE_CACHED_VMCS_ACCESSES19 %else20 17 %if HC_ARCH_BITS == 32 21 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 22 %define VMX_USE_CACHED_VMCS_ACCESSES 23 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 24 %endif ; HC_ARCH_BITS == 32 25 %endif ; VBOX_WITH_OLD_VTX_CODE 18 %ifndef VBOX_WITH_HYBRID_32BIT_KERNEL 19 %define VMX_USE_CACHED_VMCS_ACCESSES 20 %endif 21 %endif 26 22 27 23 %define VBOX_WITH_AUTO_MSR_LOAD_RESTORE
Note:
See TracChangeset
for help on using the changeset viewer.