Changeset 81177 in vbox
- Timestamp:
- Oct 9, 2019 10:41:01 AM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r81170 r81177 1930 1930 } 1931 1931 1932 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1933 /** 1934 * Exports the nested-guest hardware virtualization state into the nested-guest 1932 /** 1933 * Exports the hardware virtualization state into the nested-guest 1935 1934 * VMCB. 1936 1935 * 1937 * @param pVCpu 1938 * @param pVmcb NstGst Pointer to the nested-guestVM control block.1936 * @param pVCpu The cross context virtual CPU structure. 1937 * @param pVmcb Pointer to the VM control block. 1939 1938 * 1940 1939 * @remarks No-long-jump zone!!! 1941 1940 */ 1942 static void hmR0SvmExportGuestHwvirtState Nested(PVMCPUCC pVCpu, PSVMVMCB pVmcbNstGst)1941 static void hmR0SvmExportGuestHwvirtState(PVMCPUCC pVCpu, PSVMVMCB pVmcb) 1943 1942 { 1944 1943 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1946 1945 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT) 1947 1946 { 1947 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable) 1948 { 1949 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1950 PCVM pVM = pVCpu->CTX_SUFF(pVM); 1951 1952 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); /* Nested VGIF is not supported yet. */ 1953 Assert(pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF); /* Physical hardware supports VGIF. */ 1954 Assert(HMIsSvmVGifActive(pVM)); /* Outer VM has enabled VGIF. */ 1955 NOREF(pVM); 1956 1957 pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx); 1958 } 1959 1948 1960 /* 1949 1961 * Ensure the nested-guest pause-filter counters don't exceed the outer guest values esp. … … 1954 1966 * and may continue execution in SVM R0 without a nested-guest #VMEXIT in between. 1955 1967 */ 1956 PVMCC 1957 PSVMVMCBCTRL pVmcb NstGstCtrl = &pVmcbNstGst->ctrl;1968 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 1969 PSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; 1958 1970 uint16_t const uGuestPauseFilterCount = pVM->hm.s.svm.cPauseFilter; 1959 1971 uint16_t const uGuestPauseFilterThreshold = pVM->hm.s.svm.cPauseFilterThresholdTicks; … … 1961 1973 { 1962 1974 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1963 pVmcbNstGstCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount); 1964 pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold); 1965 pVmcbNstGstCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1975 pVmcbCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount); 1976 pVmcbCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold); 1966 1977 } 1967 1978 else 1968 1979 { 1969 pVmcbNstGstCtrl->u16PauseFilterCount = uGuestPauseFilterCount; 1970 pVmcbNstGstCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold; 1971 } 1980 /** @todo r=ramshankar: We can turn these assignments into assertions. */ 1981 pVmcbCtrl->u16PauseFilterCount = uGuestPauseFilterCount; 1982 pVmcbCtrl->u16PauseFilterThreshold = uGuestPauseFilterThreshold; 1983 } 1984 pVmcbCtrl->u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1972 1985 1973 1986 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT; 1974 1987 } 1975 1988 } 1976 #endif 1989 1977 1990 1978 1991 /** … … 1985 1998 static int hmR0SvmExportGuestApicTpr(PVMCPUCC pVCpu, PSVMVMCB pVmcb) 1986 1999 { 2000 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); 2001 1987 2002 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) 1988 2003 { … … 2042 2057 2043 2058 /** 2044 * Sets up the exception interrupts required for guest (or nested-guest) 2045 * execution in the VMCB. 2059 * Sets up the exception interrupts required for guest execution in the VMCB. 2046 2060 * 2047 2061 * @param pVCpu The cross context virtual CPU structure. … … 2052 2066 static void hmR0SvmExportGuestXcptIntercepts(PVMCPUCC pVCpu, PSVMVMCB pVmcb) 2053 2067 { 2054 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));2068 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); 2055 2069 2056 2070 /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */ 2057 if ( pVCpu->hm.s.fCtxChanged& HM_CHANGED_SVM_XCPT_INTERCEPTS)2071 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_SVM_XCPT_INTERCEPTS) 2058 2072 { 2059 2073 /* Trap #UD for GIM provider (e.g. for hypercalls). */ … … 2070 2084 2071 2085 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */ 2072 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_SVM_XCPT_INTERCEPTS;2086 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_SVM_XCPT_INTERCEPTS); 2073 2087 } 2074 2088 } … … 2302 2316 2303 2317 /** 2304 * Exports the guest state from the guest-CPU context into the VMCB.2305 * 2306 * The CPU state will be loaded from these fields on every successful VM-entry.2307 * Also sets up the appropriate VMRUN function to execute guest code based on2308 * the guestCPU mode.2318 * Exports the guest or nested-guest state from the virtual-CPU context into the 2319 * VMCB. 2320 * 2321 * Also sets up the appropriate VMRUN function to execute guest or nested-guest 2322 * code based on the virtual-CPU mode. 2309 2323 * 2310 2324 * @returns VBox status code. 2311 * @param pVCpu The cross context virtual CPU structure. 2325 * @param pVCpu The cross context virtual CPU structure. 2326 * @param pSvmTransient Pointer to the SVM-transient structure. 2312 2327 * 2313 2328 * @remarks No-long-jump zone!!! 2314 2329 */ 2315 static int hmR0SvmExportGuestState(PVMCPUCC pVCpu )2330 static int hmR0SvmExportGuestState(PVMCPUCC pVCpu, PCSVMTRANSIENT pSvmTransient) 2316 2331 { 2317 2332 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x); 2318 2333 2319 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;2334 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu); 2320 2335 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2321 2322 2336 Assert(pVmcb); 2323 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);2324 2337 2325 2338 pVmcb->guest.u64RIP = pCtx->rip; … … 2327 2340 pVmcb->guest.u64RFlags = pCtx->eflags.u32; 2328 2341 pVmcb->guest.u64RAX = pCtx->rax; 2329 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2330 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable) 2331 { 2332 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF); /* Hardware supports it. */ 2333 Assert(HMIsSvmVGifActive(pVCpu->CTX_SUFF(pVM))); /* VM has configured it. */ 2334 pVmcb->ctrl.IntCtrl.n.u1VGif = CPUMGetGuestGif(pCtx); 2335 } 2336 #endif 2337 2342 2343 bool const fIsNestedGuest = pSvmTransient->fIsNestedGuest; 2338 2344 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 2339 2345 2340 2346 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb); 2341 2347 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc); 2342 2343 2348 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb); 2344 2349 hmR0SvmExportGuestMsrs(pVCpu, pVmcb); 2345 hmR0SvmExportGuest XcptIntercepts(pVCpu, pVmcb);2350 hmR0SvmExportGuestHwvirtState(pVCpu, pVmcb); 2346 2351 2347 2352 ASMSetFlags(fEFlags); 2348 2353 2349 /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we 2350 otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */ 2351 hmR0SvmExportGuestApicTpr(pVCpu, pVmcb); 2354 if (!fIsNestedGuest) 2355 { 2356 /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we 2357 otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */ 2358 hmR0SvmExportGuestApicTpr(pVCpu, pVmcb); 2359 hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb); 2360 } 2352 2361 2353 2362 rc = hmR0SvmSelectVMRunHandler(pVCpu); … … 2355 2364 2356 2365 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 2357 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( HM_CHANGED_GUEST_RIP 2358 | HM_CHANGED_GUEST_RFLAGS 2359 | HM_CHANGED_GUEST_GPRS_MASK 2360 | HM_CHANGED_GUEST_X87 2361 | HM_CHANGED_GUEST_SSE_AVX 2362 | HM_CHANGED_GUEST_OTHER_XSAVE 2363 | HM_CHANGED_GUEST_XCRx 2364 | HM_CHANGED_GUEST_TSC_AUX 2365 | HM_CHANGED_GUEST_OTHER_MSRS 2366 | HM_CHANGED_GUEST_HWVIRT 2367 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_XCPT_INTERCEPTS))); 2366 uint64_t fUnusedMask = HM_CHANGED_GUEST_RIP 2367 | HM_CHANGED_GUEST_RFLAGS 2368 | HM_CHANGED_GUEST_GPRS_MASK 2369 | HM_CHANGED_GUEST_X87 2370 | HM_CHANGED_GUEST_SSE_AVX 2371 | HM_CHANGED_GUEST_OTHER_XSAVE 2372 | HM_CHANGED_GUEST_XCRx 2373 | HM_CHANGED_GUEST_TSC_AUX 2374 | HM_CHANGED_GUEST_OTHER_MSRS; 2375 if (fIsNestedGuest) 2376 fUnusedMask |= HM_CHANGED_SVM_XCPT_INTERCEPTS 2377 | HM_CHANGED_GUEST_APIC_TPR; 2378 2379 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( fUnusedMask 2380 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK))); 2368 2381 2369 2382 #ifdef VBOX_STRICT … … 2472 2485 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; 2473 2486 2487 HMSVM_ASSERT_IN_NESTED_GUEST(&pVCpu->cpum.GstCtx); 2488 2474 2489 /* 2475 2490 * First cache the nested-guest VMCB fields we may potentially modify. … … 2534 2549 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 2535 2550 } 2536 }2537 2538 2539 /**2540 * Exports the nested-guest state into the VMCB.2541 *2542 * We need to export the entire state as we could be continuing nested-guest2543 * execution at any point (not just immediately after VMRUN) and thus the VMCB2544 * can be out-of-sync with the nested-guest state if it was executed in IEM.2545 *2546 * @returns VBox status code.2547 * @param pVCpu The cross context virtual CPU structure.2548 * @param pCtx Pointer to the guest-CPU context.2549 *2550 * @remarks No-long-jump zone!!!2551 */2552 static int hmR0SvmExportGuestStateNested(PVMCPUCC pVCpu)2553 {2554 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x);2555 2556 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx;2557 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb);2558 Assert(pVmcbNstGst);2559 2560 hmR0SvmSetupVmcbNested(pVCpu);2561 2562 pVmcbNstGst->guest.u64RIP = pCtx->rip;2563 pVmcbNstGst->guest.u64RSP = pCtx->rsp;2564 pVmcbNstGst->guest.u64RFlags = pCtx->eflags.u32;2565 pVmcbNstGst->guest.u64RAX = pCtx->rax;2566 2567 RTCCUINTREG const fEFlags = ASMIntDisableFlags();2568 2569 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcbNstGst);2570 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc);2571 2572 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcbNstGst);2573 hmR0SvmExportGuestMsrs(pVCpu, pVmcbNstGst);2574 hmR0SvmExportGuestHwvirtStateNested(pVCpu, pVmcbNstGst);2575 2576 ASMSetFlags(fEFlags);2577 2578 /* Nested VGIF not supported yet. */2579 Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable);2580 2581 rc = hmR0SvmSelectVMRunHandler(pVCpu);2582 AssertRCReturn(rc, rc);2583 2584 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */2585 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( HM_CHANGED_GUEST_RIP2586 | HM_CHANGED_GUEST_RFLAGS2587 | HM_CHANGED_GUEST_GPRS_MASK2588 | HM_CHANGED_GUEST_APIC_TPR2589 | HM_CHANGED_GUEST_X872590 | HM_CHANGED_GUEST_SSE_AVX2591 | HM_CHANGED_GUEST_OTHER_XSAVE2592 | HM_CHANGED_GUEST_XCRx2593 | HM_CHANGED_GUEST_TSC_AUX2594 | HM_CHANGED_GUEST_OTHER_MSRS2595 | HM_CHANGED_SVM_XCPT_INTERCEPTS2596 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK)));2597 2598 #ifdef VBOX_STRICT2599 /*2600 * All of the guest-CPU state and SVM keeper bits should be exported here by now, except2601 * for the host-context and/or shared host-guest context bits.2602 */2603 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged);2604 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();2605 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)),2606 ("fCtxChanged=%#RX64\n", fCtxChanged));2607 2608 /*2609 * If we need to log state that isn't always imported, we'll need to import them here.2610 * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally.2611 */2612 hmR0SvmLogState(pVCpu, pVmcbNstGst, "hmR0SvmExportGuestStateNested", 0 /* fFlags */, 0 /* uVerbose */);2613 #endif2614 2615 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x);2616 return rc;2617 2551 } 2618 2552 #endif /* VBOX_WITH_NESTED_HWVIRT_SVM */ … … 4108 4042 4109 4043 /* 4044 * Set up the nested-guest VMCB for execution using hardware-assisted SVM. 4045 */ 4046 if (pSvmTransient->fIsNestedGuest) 4047 hmR0SvmSetupVmcbNested(pVCpu); 4048 4049 /* 4110 4050 * Export the guest state bits that are not shared with the host in any way as we can 4111 4051 * longjmp or get preempted in the midst of exporting some of the state. 4112 4052 */ 4113 if (!pSvmTransient->fIsNestedGuest) 4114 rc = hmR0SvmExportGuestState(pVCpu); 4115 else 4116 rc = hmR0SvmExportGuestStateNested(pVCpu); 4053 rc = hmR0SvmExportGuestState(pVCpu, pSvmTransient); 4117 4054 AssertRCReturn(rc, rc); 4118 4055 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull);
Note:
See TracChangeset
for help on using the changeset viewer.