VirtualBox

Changeset 79167 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jun 17, 2019 5:32:49 AM (6 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
131336
Message:

VMM/HMVMXR0: Nested VMX: bugref:9180 Assert parameters in module level functions when possible and move a couple of static function parameter asserts to the caller. Nits, comments, naming consistency.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r79149 r79167  
    13561356
    13571357    /* If we're for some reason not in VMX root mode, then don't leave it. */
    1358     RTCCUINTREG const uHostCR4 = ASMGetCR4();
     1358    RTCCUINTREG const uHostCr4 = ASMGetCR4();
    13591359
    13601360    int rc;
    1361     if (uHostCR4 & X86_CR4_VMXE)
     1361    if (uHostCr4 & X86_CR4_VMXE)
    13621362    {
    13631363        /* Exit VMX root mode and clear the VMX bit in CR4. */
     
    36213621                              PCSUPHWVIRTMSRS pHwvirtMsrs)
    36223622{
    3623     Assert(pHostCpu);
    3624     Assert(pHwvirtMsrs);
     3623    AssertPtr(pHostCpu);
     3624    AssertPtr(pHwvirtMsrs);
    36253625    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    36263626
     
    36803680VMMR0DECL(int) VMXR0InitVM(PVM pVM)
    36813681{
     3682    AssertPtr(pVM);
    36823683    LogFlowFunc(("pVM=%p\n", pVM));
    36833684
     
    37013702VMMR0DECL(int) VMXR0TermVM(PVM pVM)
    37023703{
     3704    AssertPtr(pVM);
    37033705    LogFlowFunc(("pVM=%p\n", pVM));
    37043706
     
    37243726VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
    37253727{
    3726     AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
     3728    AssertPtr(pVM);
    37273729    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    37283730
     
    37333735     * VMX root mode or not without causing a #GP.
    37343736     */
    3735     RTCCUINTREG const uHostCR4 = ASMGetCR4();
    3736     if (RT_LIKELY(uHostCR4 & X86_CR4_VMXE))
     3737    RTCCUINTREG const uHostCr4 = ASMGetCR4();
     3738    if (RT_LIKELY(uHostCr4 & X86_CR4_VMXE))
    37373739    { /* likely */ }
    37383740    else
     
    38913893VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam)
    38923894{
     3895    AssertPtr(pVCpu);
    38933896    PVM pVM = pVCpu->CTX_SUFF(pVM);
    38943897    AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
     
    50945097        HMVMX_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CR3);
    50955098
    5096         RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
     5099        RTGCPHYS GCPhysGuestCr3 = NIL_RTGCPHYS;
    50975100        if (pVM->hm.s.fNestedPaging)
    50985101        {
     
    51415144                 * the guest when it's not using paging.
    51425145                 */
    5143                 GCPhysGuestCR3 = pCtx->cr3;
     5146                GCPhysGuestCr3 = pCtx->cr3;
    51445147            }
    51455148            else
     
    51665169                    AssertMsgFailedReturn(("%Rrc\n",  rc), rc);
    51675170
    5168                 GCPhysGuestCR3 = GCPhys;
     5171                GCPhysGuestCr3 = GCPhys;
    51695172            }
    51705173
    5171             Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCR3));
    5172             rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
     5174            Log4Func(("u32GuestCr3=%#RGp (GstN)\n", GCPhysGuestCr3));
     5175            rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCr3);
    51735176            AssertRCReturn(rc, rc);
    51745177        }
     
    51765179        {
    51775180            /* Non-nested paging case, just use the hypervisor's CR3. */
    5178             RTHCPHYS const HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
    5179 
    5180             Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCR3));
    5181             rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
     5181            RTHCPHYS const HCPhysGuestCr3 = PGMGetHyperCR3(pVCpu);
     5182
     5183            Log4Func(("u32GuestCr3=%#RHv (HstN)\n", HCPhysGuestCr3));
     5184            rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCr3);
    51825185            AssertRCReturn(rc, rc);
    51835186        }
     
    65906593VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
    65916594{
     6595    AssertPtr(pVCpu);
    65926596    int rc;
    65936597    switch (idxField)
     
    79717975VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat)
    79727976{
     7977    AssertPtr(pVCpu);
    79737978    PCVMXVMCSINFO pVmcsInfo = hmGetVmxActiveVmcsInfo(pVCpu);
    79747979    return hmR0VmxImportGuestState(pVCpu, pVmcsInfo, fWhat);
     
    84658470static int hmR0VmxExitToRing3(PVMCPU pVCpu, VBOXSTRICTRC rcExit)
    84668471{
    8467     Assert(pVCpu);
    84688472    HMVMX_ASSERT_PREEMPT_SAFE(pVCpu);
    84698473
     
    91469150#ifdef VBOX_STRICT
    91479151    /* At least verify VMX is enabled, since we can't check if we're in VMX root mode without #GP'ing. */
    9148     RTCCUINTREG uHostCR4 = ASMGetCR4();
    9149     if (!(uHostCR4 & X86_CR4_VMXE))
     9152    RTCCUINTREG uHostCr4 = ASMGetCR4();
     9153    if (!(uHostCr4 & X86_CR4_VMXE))
    91509154    {
    91519155        LogRelFunc(("X86_CR4_VMXE bit in CR4 is not set!\n"));
     
    91929196VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
    91939197{
    9194     NOREF(fGlobalInit);
     9198    AssertPtr(pVCpu);
     9199    RT_NOREF1(fGlobalInit);
    91959200
    91969201    switch (enmEvent)
     
    1260312608VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVMCPU pVCpu)
    1260412609{
     12610    AssertPtr(pVCpu);
    1260512611    PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;
    1260612612    Assert(VMMRZCallRing3IsEnabled(pVCpu));
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette