Changeset 72809 in vbox
- Timestamp:
- Jul 3, 2018 7:39:38 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 123352
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72808 r72809 2234 2234 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */ 2235 2235 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR) 2236 LogRel (("hmR0VmxSetupTaggedTlb:Only INDIV_ADDR supported. Ignoring VPID.\n"));2236 LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n")); 2237 2237 if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 2238 LogRel (("hmR0VmxSetupTaggedTlb:Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));2238 LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 2239 2239 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED; 2240 2240 pVM->hm.s.vmx.fVpid = false; … … 2270 2270 * @returns VBox status code. 2271 2271 * @param pVCpu The cross context virtual CPU structure. 2272 * 2273 * @remarks We don't really care about optimizing vmwrites here as it's done only 2274 * once per VM and hence we don't care about VMCS-field cache comparisons. 2272 2275 */ 2273 2276 static int hmR0VmxSetupPinCtls(PVMCPU pVCpu) 2274 2277 { 2275 AssertPtr(pVCpu);2276 2277 2278 PVM pVM = pVCpu->CTX_SUFF(pVM); 2278 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;/* Bits set here must always be set. */2279 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;/* Bits cleared here must always be cleared. */2279 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */ 2280 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */ 2280 2281 2281 2282 fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */ … … 2304 2305 if ((fVal & fZap) != fVal) 2305 2306 { 2306 LogRel (("hmR0VmxSetupPinCtls:Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",2307 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap));2307 LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2308 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap)); 2308 2309 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC; 2309 2310 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2310 2311 } 2311 2312 2313 /* Commit it to the VMCS and update our cache. */ 2312 2314 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal); 2313 2315 AssertRCReturn(rc, rc); 2314 2315 2316 pVCpu->hm.s.vmx.u32PinCtls = fVal; 2316 return rc; 2317 } 2318 2319 2320 /** 2321 * Sets up processor-based VM-execution controls in the VMCS. 2317 2318 return VINF_SUCCESS; 2319 } 2320 2321 2322 /** 2323 * Sets up secondary processor-based VM-execution controls in the VMCS. 2322 2324 * 2323 2325 * @returns VBox status code. 2324 2326 * @param pVCpu The cross context virtual CPU structure. 2327 * 2328 * @remarks We don't really care about optimizing vmwrites here as it's done only 2329 * once per VM and hence we don't care about VMCS-field cache comparisons. 2330 */ 2331 static int hmR0VmxSetupProcCtls2(PVMCPU pVCpu) 2332 { 2333 PVM pVM = pVCpu->CTX_SUFF(pVM); 2334 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2335 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2336 2337 /* WBINVD causes a VM-exit. */ 2338 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 2339 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; 2340 2341 /* Enable EPT (aka nested-paging). */ 2342 if (pVM->hm.s.fNestedPaging) 2343 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; 2344 2345 /* 2346 * Enable the INVPCID instruction if supported by the hardware and we expose 2347 * it to the guest. Without this, guest executing INVPCID would cause a #UD. 2348 */ 2349 if ( (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID) 2350 && pVM->cpum.ro.GuestFeatures.fInvpcid) 2351 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID; 2352 2353 /* Enable VPID. */ 2354 if (pVM->hm.s.vmx.fVpid) 2355 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; 2356 2357 /* Enable Unrestricted guest execution. */ 2358 if (pVM->hm.s.vmx.fUnrestrictedGuest) 2359 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; 2360 2361 #if 0 2362 if (pVM->hm.s.fVirtApicRegs) 2363 { 2364 /* Enable APIC-register virtualization. */ 2365 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 2366 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT; 2367 2368 /* Enable virtual-interrupt delivery. */ 2369 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 2370 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY; 2371 } 2372 #endif 2373 2374 /* Enable Virtual-APIC page accesses if supported by the CPU. This is where the TPR shadow resides. */ 2375 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be 2376 * done dynamically. */ 2377 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 2378 { 2379 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 2380 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */ 2381 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */ 2382 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess); 2383 AssertRCReturn(rc, rc); 2384 } 2385 2386 /* Enable RDTSCP. */ 2387 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2388 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; 2389 2390 /* Enable Pause-Loop exiting. */ 2391 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT 2392 && pVM->hm.s.vmx.cPleGapTicks 2393 && pVM->hm.s.vmx.cPleWindowTicks) 2394 { 2395 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; 2396 2397 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); 2398 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); 2399 AssertRCReturn(rc, rc); 2400 } 2401 2402 if ((fVal & fZap) != fVal) 2403 { 2404 LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2405 pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap)); 2406 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2; 2407 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2408 } 2409 2410 /* Commit it to the VMCS and update our cache. */ 2411 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal); 2412 AssertRCReturn(rc, rc); 2413 pVCpu->hm.s.vmx.u32ProcCtls2 = fVal; 2414 2415 return VINF_SUCCESS; 2416 } 2417 2418 2419 /** 2420 * Sets up processor-based VM-execution controls in the VMCS. 2421 * 2422 * @returns VBox status code. 2423 * @param pVCpu The cross context virtual CPU structure. 2424 * 2425 * @remarks We don't really care about optimizing vmwrites here as it's done only 2426 * once per VM and hence we don't care about VMCS-field cache comparisons. 2325 2427 */ 2326 2428 static int hmR0VmxSetupProcCtls(PVMCPU pVCpu) 2327 2429 { 2328 AssertPtr(pVCpu);2329 2330 int rc = VERR_INTERNAL_ERROR_5;2331 2430 PVM pVM = pVCpu->CTX_SUFF(pVM); 2332 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;/* Bits set here must be set in the VMCS. */2333 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */2431 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2432 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2334 2433 2335 2434 fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */ … … 2345 2444 || (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)) 2346 2445 { 2347 LogRel (("hmR0VmxSetupProcCtls:Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));2446 LogRelFunc(("Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!")); 2348 2447 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT; 2349 2448 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; … … 2365 2464 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); 2366 2465 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */ 2367 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);2368 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);2466 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0); 2467 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic); 2369 2468 AssertRCReturn(rc, rc); 2370 2469 … … 2394 2493 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap); 2395 2494 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */ 2396 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);2495 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap); 2397 2496 AssertRCReturn(rc, rc); 2398 2497 … … 2406 2505 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2407 2506 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE); 2408 2409 2507 #if HC_ARCH_BITS == 64 2410 2508 /* … … 2435 2533 if ((fVal & fZap) != fVal) 2436 2534 { 2437 LogRel (("hmR0VmxSetupProcCtls:Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",2438 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap));2535 LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2536 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap)); 2439 2537 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC; 2440 2538 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2441 2539 } 2442 2540 2443 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal); 2541 /* Commit it to the VMCS and update our cache. */ 2542 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal); 2444 2543 AssertRCReturn(rc, rc); 2445 2446 2544 pVCpu->hm.s.vmx.u32ProcCtls = fVal; 2447 2545 2448 /* 2449 * Secondary processor-based VM-execution controls. 2450 */ 2451 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)) 2452 { 2453 fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2454 fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2455 2456 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 2457 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */ 2458 2459 if (pVM->hm.s.fNestedPaging) 2460 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */ 2461 2462 /* 2463 * Enable the INVPCID instruction if supported by the hardware and we expose 2464 * it to the guest. Without this, guest executing INVPCID would cause a #UD. 2465 */ 2466 if ( (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID) 2467 && pVM->cpum.ro.GuestFeatures.fInvpcid) 2468 { 2469 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID; 2470 } 2471 2472 if (pVM->hm.s.vmx.fVpid) 2473 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */ 2474 2475 if (pVM->hm.s.vmx.fUnrestrictedGuest) 2476 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */ 2477 2478 #if 0 2479 if (pVM->hm.s.fVirtApicRegs) 2480 { 2481 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 2482 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT; /* Enable APIC-register virtualization. */ 2483 2484 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 2485 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY; /* Enable virtual-interrupt delivery. */ 2486 } 2487 #endif 2488 2489 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */ 2490 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be 2491 * done dynamically. */ 2492 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 2493 { 2494 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 2495 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */ 2496 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */ 2497 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess); 2498 AssertRCReturn(rc, rc); 2499 } 2500 2501 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2502 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */ 2503 2504 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT 2505 && pVM->hm.s.vmx.cPleGapTicks 2506 && pVM->hm.s.vmx.cPleWindowTicks) 2507 { 2508 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; /* Enable pause-loop exiting. */ 2509 2510 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); 2511 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks); 2512 AssertRCReturn(rc, rc); 2513 } 2514 2515 if ((fVal & fZap) != fVal) 2516 { 2517 LogRel(("hmR0VmxSetupProcCtls: Invalid secondary processor-based VM-execution controls combo! " 2518 "cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap)); 2519 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2; 2520 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2521 } 2522 2523 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal); 2524 AssertRCReturn(rc, rc); 2525 2526 pVCpu->hm.s.vmx.u32ProcCtls2 = fVal; 2527 } 2528 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest)) 2529 { 2530 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not " 2531 "available\n")); 2546 /* Set up secondary processor-based VM-execution controls if the CPU supports it. */ 2547 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 2548 return hmR0VmxSetupProcCtls2(pVCpu); 2549 2550 /* Sanity check, should not really happen. */ 2551 if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest)) 2552 { 2553 LogRelFunc(("Unrestricted Guest enabled when secondary processor-based VM-execution controls not available\n")); 2532 2554 pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO; 2533 2555 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2534 2556 } 2535 2557 2558 /* Old CPUs without secondary processor-based VM-execution controls would end up here. */ 2536 2559 return VINF_SUCCESS; 2537 2560 } … … 2655 2678 if (RT_FAILURE(rc)) 2656 2679 { 2657 LogRel (("VMXR0InitVM:hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));2680 LogRelFunc(("hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc)); 2658 2681 return rc; 2659 2682 } … … 2705 2728 || !pVM->hm.s.vmx.pRealModeTSS)) 2706 2729 { 2707 LogRel (("VMXR0SetupVM:Invalid real-on-v86 state.\n"));2730 LogRelFunc(("Invalid real-on-v86 state.\n")); 2708 2731 return VERR_INTERNAL_ERROR; 2709 2732 } … … 2717 2740 if (RT_FAILURE(rc)) 2718 2741 { 2719 LogRel (("VMXR0SetupVM:hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));2742 LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc)); 2720 2743 return rc; 2721 2744 } … … 3195 3218 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS) 3196 3219 { 3197 PVM pVM 3198 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0;/* Bits set here must be set in the VMCS. */3199 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */3220 PVM pVM = pVCpu->CTX_SUFF(pVM); 3221 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3222 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3200 3223 3201 3224 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */ … … 3236 3259 } 3237 3260 3238 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal); 3239 AssertRCReturn(rc, rc); 3240 3241 pVCpu->hm.s.vmx.u32EntryCtls = fVal; 3261 /* Commit it to the VMCS and update our cache. */ 3262 if (pVCpu->hm.s.vmx.u32EntryCtls != fVal) 3263 { 3264 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal); 3265 AssertRCReturn(rc, rc); 3266 pVCpu->hm.s.vmx.u32EntryCtls = fVal; 3267 } 3268 3242 3269 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS); 3243 3270 } … … 3261 3288 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS) 3262 3289 { 3263 PVM pVM 3264 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;/* Bits set here must be set in the VMCS. */3265 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */3290 PVM pVM = pVCpu->CTX_SUFF(pVM); 3291 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3292 uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3266 3293 3267 3294 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */ … … 3306 3333 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */ 3307 3334 3335 /* Enable saving of the VMX preemption timer value on VM-exit. */ 3308 3336 if ( pVM->hm.s.vmx.fUsePreemptTimer 3309 3337 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)) … … 3312 3340 if ((fVal & fZap) != fVal) 3313 3341 { 3314 LogRel (("hmR0VmxSetupProcCtls:Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n",3315 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap));3342 LogRelFunc(("Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n", 3343 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap)); 3316 3344 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT; 3317 3345 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3318 3346 } 3319 3347 3320 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal); 3321 AssertRCReturn(rc, rc); 3322 3323 pVCpu->hm.s.vmx.u32ExitCtls = fVal; 3348 /* Commit it to the VMCS and update our cache. */ 3349 if (pVCpu->hm.s.vmx.u32ExitCtls != fVal) 3350 { 3351 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal); 3352 AssertRCReturn(rc, rc); 3353 pVCpu->hm.s.vmx.u32ExitCtls = fVal; 3354 } 3355 3324 3356 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS); 3325 3357 } … … 3765 3797 AssertRCReturn(rc, rc); 3766 3798 3799 /* Update our caches. */ 3767 3800 pVCpu->hm.s.vmx.u32CR0Mask = uCR0Mask; 3768 3801 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls; … … 3972 4005 3973 4006 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */ 3974 uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);3975 uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);4007 uint64_t const fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 4008 uint64_t const fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 3976 4009 uGuestCR4 |= fSetCR4; 3977 4010 uGuestCR4 &= fZapCR4; 3978 4011 3979 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */ 4012 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, 4013 that would cause a VM-exit. */ 3980 4014 uint32_t u32CR4Mask = X86_CR4_VME 3981 4015 | X86_CR4_PAE … … 3988 4022 u32CR4Mask |= X86_CR4_PCIDE; 3989 4023 3990 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow into the VMCS. */ 4024 /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow 4025 into the VMCS and update our cache. */ 3991 4026 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4); 3992 4027 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uShadowCR4); … … 4145 4180 4146 4181 /* 4147 * Update the processor-based VM-execution controls for MOV-DRx intercepts and the monitor-trap flag. 4182 * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the 4183 * monitor-trap flag and update our cache. 4148 4184 */ 4149 4185 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls) … … 5166 5202 * @param paParam Array of 32-bit parameters. 5167 5203 */ 5168 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, 5169 uint32_t cParams, uint32_t *paParam) 5204 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam) 5170 5205 { 5171 5206 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 5556 5591 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu) 5557 5592 { 5558 int rc;5559 bool fOffsettedTsc;5560 bool fParavirtTsc;5561 PVM pVM = pVCpu->CTX_SUFF(pVM);5593 bool fOffsettedTsc; 5594 bool fParavirtTsc; 5595 PVM pVM = pVCpu->CTX_SUFF(pVM); 5596 uint64_t uTscOffset; 5562 5597 if (pVM->hm.s.vmx.fUsePreemptTimer) 5563 5598 { 5564 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, 5565 &fOffsettedTsc, &fParavirtTsc); 5599 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc); 5566 5600 5567 5601 /* Make sure the returned values have sane upper and lower boundaries. */ … … 5572 5606 5573 5607 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16); 5574 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc); 5608 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); 5609 AssertRC(rc); 5575 5610 } 5576 5611 else 5577 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, & pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);5612 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc); 5578 5613 5579 5614 /** @todo later optimize this to be done elsewhere and not before every … … 5584 5619 information before every VM-entry, hence disable it for performance sake. */ 5585 5620 #if 0 5586 rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);5621 int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */); 5587 5622 AssertRC(rc); 5588 5623 #endif … … 5590 5625 } 5591 5626 5592 if (fOffsettedTsc && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit)) 5593 { 5594 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 5595 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc); 5596 5597 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5598 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5627 if ( fOffsettedTsc 5628 && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit)) 5629 { 5630 if (pVCpu->hm.s.vmx.u64TSCOffset != uTscOffset) 5631 { 5632 int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); 5633 AssertRC(rc); 5634 pVCpu->hm.s.vmx.u64TSCOffset = uTscOffset; 5635 } 5636 5637 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT) 5638 { 5639 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5640 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 5641 AssertRC(rc); 5642 } 5599 5643 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset); 5600 5644 } … … 5602 5646 { 5603 5647 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */ 5604 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5605 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc); 5648 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)) 5649 { 5650 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT; 5651 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 5652 AssertRC(rc); 5653 } 5606 5654 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept); 5607 5655 } … … 6434 6482 if (VMMRZCallRing3IsEnabled(pVCpu)) 6435 6483 { 6436 VMMR0LogFlushDisable(pVCpu);6437 6438 6484 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 6485 { 6486 Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3)); 6439 6487 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 6488 } 6440 6489 6441 6490 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)) … … 6444 6493 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 6445 6494 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 6446 6447 VMMR0LogFlushEnable(pVCpu);6448 6495 } 6449 6496 … … 7782 7829 if (!(uHostCR4 & X86_CR4_VMXE)) 7783 7830 { 7784 LogRel (("VMXR0Enter:X86_CR4_VMXE bit in CR4 is not set!\n"));7831 LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n")); 7785 7832 return VERR_VMX_X86_CR4_VMXE_CLEARED; 7786 7833 } … … 10287 10334 * CR0. 10288 10335 */ 10289 uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);10290 uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);10336 uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10337 uint32_t const fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10291 10338 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). 10292 10339 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */ … … 10309 10356 * CR4. 10310 10357 */ 10311 uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);10312 uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);10358 uint64_t const fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10359 uint64_t const fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10313 10360 10314 10361 uint32_t uGuestCR4;
Note:
See TracChangeset
for help on using the changeset viewer.