VirtualBox

Changeset 72809 in vbox


Ignore:
Timestamp:
Jul 3, 2018 7:39:38 AM (7 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
123352
Message:

VMM/HMVMXR0: Cleanup, assertions, more consts esp. in long functions to prevent accidental modifications.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r72808 r72809  
    22342234                /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
    22352235                if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
    2236                     LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
     2236                    LogRelFunc(("Only INDIV_ADDR supported. Ignoring VPID.\n"));
    22372237                if (pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
    2238                     LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
     2238                    LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
    22392239                pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED;
    22402240                pVM->hm.s.vmx.fVpid = false;
     
    22702270 * @returns VBox status code.
    22712271 * @param   pVCpu       The cross context virtual CPU structure.
     2272 *
     2273 * @remarks We don't really care about optimizing vmwrites here as it's done only
     2274 *          once per VM and hence we don't care about VMCS-field cache comparisons.
    22722275 */
    22732276static int hmR0VmxSetupPinCtls(PVMCPU pVCpu)
    22742277{
    2275     AssertPtr(pVCpu);
    2276 
    22772278    PVM pVM = pVCpu->CTX_SUFF(pVM);
    2278     uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;         /* Bits set here must always be set. */
    2279     uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;            /* Bits cleared here must always be cleared. */
     2279    uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0;   /* Bits set here must always be set. */
     2280    uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1;      /* Bits cleared here must always be cleared. */
    22802281
    22812282    fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT              /* External interrupts cause a VM-exit. */
     
    23042305    if ((fVal & fZap) != fVal)
    23052306    {
    2306         LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
    2307                 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap));
     2307        LogRelFunc(("Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
     2308                    pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap));
    23082309        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
    23092310        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    23102311    }
    23112312
     2313    /* Commit it to the VMCS and update our cache. */
    23122314    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal);
    23132315    AssertRCReturn(rc, rc);
    2314 
    23152316    pVCpu->hm.s.vmx.u32PinCtls = fVal;
    2316     return rc;
    2317 }
    2318 
    2319 
    2320 /**
    2321  * Sets up processor-based VM-execution controls in the VMCS.
     2317
     2318    return VINF_SUCCESS;
     2319}
     2320
     2321
     2322/**
     2323 * Sets up secondary processor-based VM-execution controls in the VMCS.
    23222324 *
    23232325 * @returns VBox status code.
    23242326 * @param   pVCpu       The cross context virtual CPU structure.
     2327 *
     2328 * @remarks We don't really care about optimizing vmwrites here as it's done only
     2329 *          once per VM and hence we don't care about VMCS-field cache comparisons.
     2330 */
     2331static int hmR0VmxSetupProcCtls2(PVMCPU pVCpu)
     2332{
     2333    PVM pVM = pVCpu->CTX_SUFF(pVM);
     2334    uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
     2335    uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
     2336
     2337    /* WBINVD causes a VM-exit. */
     2338    if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
     2339        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
     2340
     2341    /* Enable EPT (aka nested-paging). */
     2342    if (pVM->hm.s.fNestedPaging)
     2343        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
     2344
     2345    /*
     2346     * Enable the INVPCID instruction if supported by the hardware and we expose
     2347     * it to the guest. Without this, guest executing INVPCID would cause a #UD.
     2348     */
     2349    if (   (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
     2350        && pVM->cpum.ro.GuestFeatures.fInvpcid)
     2351        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
     2352
     2353    /* Enable VPID. */
     2354    if (pVM->hm.s.vmx.fVpid)
     2355        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;
     2356
     2357    /* Enable Unrestricted guest execution. */
     2358    if (pVM->hm.s.vmx.fUnrestrictedGuest)
     2359        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;
     2360
     2361#if 0
     2362    if (pVM->hm.s.fVirtApicRegs)
     2363    {
     2364        /* Enable APIC-register virtualization. */
     2365        Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
     2366        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;
     2367
     2368        /* Enable virtual-interrupt delivery. */
     2369        Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
     2370        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;
     2371    }
     2372#endif
     2373
     2374    /* Enable Virtual-APIC page accesses if supported by the CPU. This is where the TPR shadow resides. */
     2375    /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
     2376     *        done dynamically. */
     2377    if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
     2378    {
     2379        Assert(pVM->hm.s.vmx.HCPhysApicAccess);
     2380        Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff));    /* Bits 11:0 MBZ. */
     2381        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;           /* Virtualize APIC accesses. */
     2382        int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
     2383        AssertRCReturn(rc, rc);
     2384    }
     2385
     2386    /* Enable RDTSCP. */
     2387    if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     2388        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
     2389
     2390    /* Enable Pause-Loop exiting. */
     2391    if (   pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
     2392        && pVM->hm.s.vmx.cPleGapTicks
     2393        && pVM->hm.s.vmx.cPleWindowTicks)
     2394    {
     2395        fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;
     2396
     2397        int rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
     2398        rc     |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
     2399        AssertRCReturn(rc, rc);
     2400    }
     2401
     2402    if ((fVal & fZap) != fVal)
     2403    {
     2404        LogRelFunc(("Invalid secondary processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
     2405                    pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap));
     2406        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
     2407        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     2408    }
     2409
     2410    /* Commit it to the VMCS and update our cache. */
     2411    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
     2412    AssertRCReturn(rc, rc);
     2413    pVCpu->hm.s.vmx.u32ProcCtls2 = fVal;
     2414
     2415    return VINF_SUCCESS;
     2416}
     2417
     2418
     2419/**
     2420 * Sets up processor-based VM-execution controls in the VMCS.
     2421 *
     2422 * @returns VBox status code.
     2423 * @param   pVCpu       The cross context virtual CPU structure.
     2424 *
     2425 * @remarks We don't really care about optimizing vmwrites here as it's done only
     2426 *          once per VM and hence we don't care about VMCS-field cache comparisons.
    23252427 */
    23262428static int hmR0VmxSetupProcCtls(PVMCPU pVCpu)
    23272429{
    2328     AssertPtr(pVCpu);
    2329 
    2330     int rc = VERR_INTERNAL_ERROR_5;
    23312430    PVM pVM = pVCpu->CTX_SUFF(pVM);
    2332     uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
    2333     uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
     2431    uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
     2432    uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    23342433
    23352434    fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT                      /* HLT causes a VM-exit. */
     
    23452444        ||  (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
    23462445    {
    2347         LogRel(("hmR0VmxSetupProcCtls: Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
     2446        LogRelFunc(("Unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
    23482447        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
    23492448        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     
    23652464        Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
    23662465        Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff));        /* Bits 11:0 MBZ. */
    2367         rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
    2368         rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
     2466        int rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
     2467        rc     |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
    23692468        AssertRCReturn(rc, rc);
    23702469
     
    23942493        Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    23952494        Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff));       /* Bits 11:0 MBZ. */
    2396         rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
     2495        int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
    23972496        AssertRCReturn(rc, rc);
    23982497
     
    24062505        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    24072506        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2408 
    24092507#if HC_ARCH_BITS == 64
    24102508        /*
     
    24352533    if ((fVal & fZap) != fVal)
    24362534    {
    2437         LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
    2438                 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap));
     2535        LogRelFunc(("Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n",
     2536                    pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap));
    24392537        pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
    24402538        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    24412539    }
    24422540
    2443     rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
     2541    /* Commit it to the VMCS and update our cache. */
     2542    int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal);
    24442543    AssertRCReturn(rc, rc);
    2445 
    24462544    pVCpu->hm.s.vmx.u32ProcCtls = fVal;
    24472545
    2448     /*
    2449      * Secondary processor-based VM-execution controls.
    2450      */
    2451     if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
    2452     {
    2453         fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;     /* Bits set here must be set in the VMCS. */
    2454         fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;        /* Bits cleared here must be cleared in the VMCS. */
    2455 
    2456         if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
    2457             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;         /* WBINVD causes a VM-exit. */
    2458 
    2459         if (pVM->hm.s.fNestedPaging)
    2460             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;                 /* Enable EPT. */
    2461 
    2462         /*
    2463          * Enable the INVPCID instruction if supported by the hardware and we expose
    2464          * it to the guest. Without this, guest executing INVPCID would cause a #UD.
    2465          */
    2466         if (   (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
    2467             && pVM->cpum.ro.GuestFeatures.fInvpcid)
    2468         {
    2469             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
    2470         }
    2471 
    2472         if (pVM->hm.s.vmx.fVpid)
    2473             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;                /* Enable VPID. */
    2474 
    2475         if (pVM->hm.s.vmx.fUnrestrictedGuest)
    2476             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;  /* Enable Unrestricted Execution. */
    2477 
    2478 #if 0
    2479         if (pVM->hm.s.fVirtApicRegs)
    2480         {
    2481             Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT);
    2482             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;       /* Enable APIC-register virtualization. */
    2483 
    2484             Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY);
    2485             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;  /* Enable virtual-interrupt delivery. */
    2486         }
    2487 #endif
    2488 
    2489         /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
    2490         /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
    2491          *        done dynamically. */
    2492         if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
    2493         {
    2494             Assert(pVM->hm.s.vmx.HCPhysApicAccess);
    2495             Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff));    /* Bits 11:0 MBZ. */
    2496             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;           /* Virtualize APIC accesses. */
    2497             rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
    2498             AssertRCReturn(rc, rc);
    2499         }
    2500 
    2501         if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    2502             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;              /* Enable RDTSCP support. */
    2503 
    2504         if (   pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT
    2505             && pVM->hm.s.vmx.cPleGapTicks
    2506             && pVM->hm.s.vmx.cPleWindowTicks)
    2507         {
    2508             fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;     /* Enable pause-loop exiting. */
    2509 
    2510             rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks);
    2511             rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_WINDOW, pVM->hm.s.vmx.cPleWindowTicks);
    2512             AssertRCReturn(rc, rc);
    2513         }
    2514 
    2515         if ((fVal & fZap) != fVal)
    2516         {
    2517             LogRel(("hmR0VmxSetupProcCtls: Invalid secondary processor-based VM-execution controls combo! "
    2518                     "cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap));
    2519             pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2;
    2520             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    2521         }
    2522 
    2523         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal);
    2524         AssertRCReturn(rc, rc);
    2525 
    2526         pVCpu->hm.s.vmx.u32ProcCtls2 = fVal;
    2527     }
    2528     else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
    2529     {
    2530         LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
    2531                 "available\n"));
     2546    /* Set up secondary processor-based VM-execution controls if the CPU supports it. */
     2547    if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
     2548        return hmR0VmxSetupProcCtls2(pVCpu);
     2549
     2550    /* Sanity check, should not really happen. */
     2551    if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
     2552    {
     2553        LogRelFunc(("Unrestricted Guest enabled when secondary processor-based VM-execution controls not available\n"));
    25322554        pVCpu->hm.s.u32HMError = VMX_UFC_INVALID_UX_COMBO;
    25332555        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    25342556    }
    25352557
     2558    /* Old CPUs without secondary processor-based VM-execution controls would end up here. */
    25362559    return VINF_SUCCESS;
    25372560}
     
    26552678    if (RT_FAILURE(rc))
    26562679    {
    2657         LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
     2680        LogRelFunc(("hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
    26582681        return rc;
    26592682    }
     
    27052728             || !pVM->hm.s.vmx.pRealModeTSS))
    27062729    {
    2707         LogRel(("VMXR0SetupVM: Invalid real-on-v86 state.\n"));
     2730        LogRelFunc(("Invalid real-on-v86 state.\n"));
    27082731        return VERR_INTERNAL_ERROR;
    27092732    }
     
    27172740    if (RT_FAILURE(rc))
    27182741    {
    2719         LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
     2742        LogRelFunc(("hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
    27202743        return rc;
    27212744    }
     
    31953218    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS)
    31963219    {
    3197         PVM pVM       = pVCpu->CTX_SUFF(pVM);
    3198         uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0;      /* Bits set here must be set in the VMCS. */
    3199         uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;          /* Bits cleared here must be cleared in the VMCS. */
     3220        PVM pVM = pVCpu->CTX_SUFF(pVM);
     3221        uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
     3222        uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1;    /* Bits cleared here must be cleared in the VMCS. */
    32003223
    32013224        /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
     
    32363259        }
    32373260
    3238         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
    3239         AssertRCReturn(rc, rc);
    3240 
    3241         pVCpu->hm.s.vmx.u32EntryCtls = fVal;
     3261        /* Commit it to the VMCS and update our cache. */
     3262        if (pVCpu->hm.s.vmx.u32EntryCtls != fVal)
     3263        {
     3264            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal);
     3265            AssertRCReturn(rc, rc);
     3266            pVCpu->hm.s.vmx.u32EntryCtls = fVal;
     3267        }
     3268
    32423269        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS);
    32433270    }
     
    32613288    if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS)
    32623289    {
    3263         PVM pVM       = pVCpu->CTX_SUFF(pVM);
    3264         uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;        /* Bits set here must be set in the VMCS. */
    3265         uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;           /* Bits cleared here must be cleared in the VMCS. */
     3290        PVM pVM = pVCpu->CTX_SUFF(pVM);
     3291        uint32_t       fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0;  /* Bits set here must be set in the VMCS. */
     3292        uint32_t const fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1;     /* Bits cleared here must be cleared in the VMCS. */
    32663293
    32673294        /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
     
    33063333         *        VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR. */
    33073334
     3335        /* Enable saving of the VMX preemption timer value on VM-exit. */
    33083336        if (    pVM->hm.s.vmx.fUsePreemptTimer
    33093337            && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER))
     
    33123340        if ((fVal & fZap) != fVal)
    33133341        {
    3314             LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
    3315                     pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap));
     3342            LogRelFunc(("Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n",
     3343                        pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap));
    33163344            pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
    33173345            return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    33183346        }
    33193347
    3320         int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
    3321         AssertRCReturn(rc, rc);
    3322 
    3323         pVCpu->hm.s.vmx.u32ExitCtls = fVal;
     3348        /* Commit it to the VMCS and update our cache. */
     3349        if (pVCpu->hm.s.vmx.u32ExitCtls != fVal)
     3350        {
     3351            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal);
     3352            AssertRCReturn(rc, rc);
     3353            pVCpu->hm.s.vmx.u32ExitCtls = fVal;
     3354        }
     3355
    33243356        ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS);
    33253357    }
     
    37653797        AssertRCReturn(rc, rc);
    37663798
     3799        /* Update our caches. */
    37673800        pVCpu->hm.s.vmx.u32CR0Mask  = uCR0Mask;
    37683801        pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls;
     
    39724005
    39734006        /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
    3974         uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    3975         uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     4007        uint64_t const fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     4008        uint64_t const fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    39764009        uGuestCR4 |= fSetCR4;
    39774010        uGuestCR4 &= fZapCR4;
    39784011
    3979         /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM-exit. */
     4012        /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them,
     4013           that would cause a VM-exit. */
    39804014        uint32_t u32CR4Mask = X86_CR4_VME
    39814015                            | X86_CR4_PAE
     
    39884022            u32CR4Mask |= X86_CR4_PCIDE;
    39894023
    3990         /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow into the VMCS. */
     4024        /* Write VT-x's view of the guest CR4, the CR4 modify mask and the read-only CR4 shadow
     4025           into the VMCS and update our cache. */
    39914026        rc  = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4);
    39924027        rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uShadowCR4);
     
    41454180
    41464181    /*
    4147      * Update the processor-based VM-execution controls for MOV-DRx intercepts and the monitor-trap flag.
     4182     * Update the processor-based VM-execution controls with the MOV-DRx intercepts and the
     4183     * monitor-trap flag and update our cache.
    41484184     */
    41494185    if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls)
     
    51665202 * @param   paParam     Array of 32-bit parameters.
    51675203 */
    5168 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp,
    5169                                          uint32_t cParams, uint32_t *paParam)
     5204VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
    51705205{
    51715206    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    55565591static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
    55575592{
    5558     int  rc;
    5559     bool fOffsettedTsc;
    5560     bool fParavirtTsc;
    5561     PVM  pVM = pVCpu->CTX_SUFF(pVM);
     5593    bool     fOffsettedTsc;
     5594    bool     fParavirtTsc;
     5595    PVM      pVM = pVCpu->CTX_SUFF(pVM);
     5596    uint64_t uTscOffset;
    55625597    if (pVM->hm.s.vmx.fUsePreemptTimer)
    55635598    {
    5564         uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset,
    5565                                                                      &fOffsettedTsc, &fParavirtTsc);
     5599        uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVM, pVCpu, &uTscOffset, &fOffsettedTsc, &fParavirtTsc);
    55665600
    55675601        /* Make sure the returned values have sane upper and lower boundaries. */
     
    55725606
    55735607        uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
    5574         rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);        AssertRC(rc);
     5608        int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount);
     5609        AssertRC(rc);
    55755610    }
    55765611    else
    5577         fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset, &fParavirtTsc);
     5612        fOffsettedTsc = TMCpuTickCanUseRealTSC(pVM, pVCpu, &uTscOffset, &fParavirtTsc);
    55785613
    55795614    /** @todo later optimize this to be done elsewhere and not before every
     
    55845619           information before every VM-entry, hence disable it for performance sake. */
    55855620#if 0
    5586         rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
     5621        int rc = GIMR0UpdateParavirtTsc(pVM, 0 /* u64Offset */);
    55875622        AssertRC(rc);
    55885623#endif
     
    55905625    }
    55915626
    5592     if (fOffsettedTsc && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
    5593     {
    5594         /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
    5595         rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);     AssertRC(rc);
    5596 
    5597         pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
    5598         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);            AssertRC(rc);
     5627    if (   fOffsettedTsc
     5628        && RT_LIKELY(!pVCpu->hm.s.fDebugWantRdTscExit))
     5629    {
     5630        if (pVCpu->hm.s.vmx.u64TSCOffset != uTscOffset)
     5631        {
     5632            int rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);
     5633            AssertRC(rc);
     5634            pVCpu->hm.s.vmx.u64TSCOffset = uTscOffset;
     5635        }
     5636
     5637        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT)
     5638        {
     5639            pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     5640            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
     5641            AssertRC(rc);
     5642        }
    55995643        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
    56005644    }
     
    56025646    {
    56035647        /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
    5604         pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
    5605         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);            AssertRC(rc);
     5648        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
     5649        {
     5650            pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
     5651            int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
     5652            AssertRC(rc);
     5653        }
    56065654        STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
    56075655    }
     
    64346482    if (VMMRZCallRing3IsEnabled(pVCpu))
    64356483    {
    6436         VMMR0LogFlushDisable(pVCpu);
    6437 
    64386484        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     6485        {
     6486            Assert(!(ASMAtomicUoReadU64(&pCtx->fExtrn) & CPUMCTX_EXTRN_CR3));
    64396487            PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
     6488        }
    64406489
    64416490        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
     
    64446493        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
    64456494        Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
    6446 
    6447         VMMR0LogFlushEnable(pVCpu);
    64486495    }
    64496496
     
    77827829    if (!(uHostCR4 & X86_CR4_VMXE))
    77837830    {
    7784         LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
     7831        LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
    77857832        return VERR_VMX_X86_CR4_VMXE_CLEARED;
    77867833    }
     
    1028710334         * CR0.
    1028810335         */
    10289         uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    10290         uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     10336        uint32_t       fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
     10337        uint32_t const fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);
    1029110338        /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
    1029210339           See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */
     
    1030910356         * CR4.
    1031010357         */
    10311         uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    10312         uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     10358        uint64_t const fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
     10359        uint64_t const fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);
    1031310360
    1031410361        uint32_t uGuestCR4;
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette