VirtualBox

Ignore:
Timestamp:
Jul 8, 2018 6:22:48 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM: Cleanup, nits.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r72965 r72966  
    116116
    117117/** Assert that the required state bits are fetched. */
    118 #define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz)          AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
    119                                                                       ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
    120                                                                       (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
     118#define HMSVM_CPUMCTX_ASSERT(a_pVCpu, a_fExtrnMbz)      AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
     119                                                                  ("fExtrn=%#RX64 fExtrnMbz=%#RX64\n", \
     120                                                                  (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz)))
     121
     122/** Assert that preemption is disabled or covered by thread-context hooks. */
     123#define HMSVM_ASSERT_PREEMPT_SAFE(a_pVCpu)              Assert(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
     124                                                               || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     125
     126/** Assert that we haven't migrated CPUs when thread-context hooks are not
     127 *  used. */
     128#define HMSVM_ASSERT_CPU_SAFE(a_pVCpu)                  AssertMsg(   VMMR0ThreadCtxHookIsEnabled((a_pVCpu)) \
     129                                                                  || (a_pVCpu)->hm.s.idEnteredCpu == RTMpCpuId(), \
     130                                                                  ("Illegal migration! Entered on CPU %u Current %u\n", \
     131                                                                   (a_pVCpu)->hm.s.idEnteredCpu, RTMpCpuId()));
     132
     133/** Assert that we're not executing a nested-guest. */
     134#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     135# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx)       Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
     136#else
     137# define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx)       do { NOREF((a_pCtx)); } while (0)
     138#endif
     139
     140/** Assert that we're executing a nested-guest. */
     141#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
     142# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx)           Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
     143#else
     144# define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx)           do { NOREF((a_pCtx)); } while (0)
     145#endif
    121146
    122147/** Macro for checking and returning from the using function for
     
    167192            (a_rc) = VINF_EM_DBG_STEPPED; \
    168193    } while (0)
    169 
    170 /** Assert that preemption is disabled or covered by thread-context hooks. */
    171 #define HMSVM_ASSERT_PREEMPT_SAFE()           Assert(   VMMR0ThreadCtxHookIsEnabled(pVCpu) \
    172                                                      || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    173 
    174 /** Assert that we haven't migrated CPUs when thread-context hooks are not
    175  *  used. */
    176 #define HMSVM_ASSERT_CPU_SAFE()               AssertMsg(   VMMR0ThreadCtxHookIsEnabled(pVCpu) \
    177                                                         || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
    178                                                         ("Illegal migration! Entered on CPU %u Current %u\n", \
    179                                                         pVCpu->hm.s.idEnteredCpu, RTMpCpuId()));
    180 
    181 /** Assert that we're not executing a nested-guest. */
    182 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    183 # define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx)       Assert(!CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
    184 #else
    185 # define HMSVM_ASSERT_NOT_IN_NESTED_GUEST(a_pCtx)       do { NOREF((a_pCtx)); } while (0)
    186 #endif
    187 
    188 /** Assert that we're executing a nested-guest. */
    189 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM
    190 # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx)           Assert(CPUMIsGuestInSvmNestedHwVirtMode((a_pCtx)))
    191 #else
    192 # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx)           do { NOREF((a_pCtx)); } while (0)
    193 #endif
    194194
    195195/** Validate segment descriptor granularity bit. */
     
    816816    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    817817    {
    818         return    (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN)
    819                && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
     818        return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VMCB_CLEAN)
     819            && pVM->cpum.ro.GuestFeatures.fSvmVmcbClean;
    820820    }
    821821#else
     
    839839    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    840840    {
    841         return    (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
    842                &&  pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
     841        return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_DECODE_ASSISTS)
     842            &&  pVM->cpum.ro.GuestFeatures.fSvmDecodeAssists;
    843843    }
    844844#else
     
    862862    if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
    863863    {
    864         return    (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
    865                &&  pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
     864        return (pVM->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_NRIP_SAVE)
     865            &&  pVM->cpum.ro.GuestFeatures.fSvmNextRipSave;
    866866    }
    867867#else
     
    32323232    Assert(pvUser);
    32333233    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    3234     HMSVM_ASSERT_PREEMPT_SAFE();
     3234    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    32353235
    32363236    VMMRZCallRing3Disable(pVCpu);
     
    32633263    Assert(pVCpu);
    32643264    Assert(pCtx);
    3265     HMSVM_ASSERT_PREEMPT_SAFE();
     3265    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    32663266
    32673267    /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
     
    40844084static void hmR0SvmReportWorldSwitchError(PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx)
    40854085{
    4086     HMSVM_ASSERT_PREEMPT_SAFE();
     4086    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    40874087    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    40884088    HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL);
     
    43284328static int hmR0SvmPreRunGuestNested(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    43294329{
    4330     HMSVM_ASSERT_PREEMPT_SAFE();
     4330    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    43314331    HMSVM_ASSERT_IN_NESTED_GUEST(pCtx);
    43324332
     
    44474447static int hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)
    44484448{
    4449     HMSVM_ASSERT_PREEMPT_SAFE();
     4449    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    44504450    HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx);
    44514451
     
    48774877    {
    48784878        Assert(!HMR0SuspendPending());
    4879         HMSVM_ASSERT_CPU_SAFE();
     4879        HMSVM_ASSERT_CPU_SAFE(pVCpu);
    48804880
    48814881        /* Preparatory work for running nested-guest code, this may force us to return to
     
    50615061    {
    50625062        Assert(!HMR0SuspendPending());
    5063         HMSVM_ASSERT_CPU_SAFE();
     5063        HMSVM_ASSERT_CPU_SAFE(pVCpu);
    50645064
    50655065        /* Preparatory work for running nested-guest code, this may force us to return to
     
    51385138{
    51395139    Assert(VMMRZCallRing3IsEnabled(pVCpu));
    5140     HMSVM_ASSERT_PREEMPT_SAFE();
     5140    HMSVM_ASSERT_PREEMPT_SAFE(pVCpu);
    51415141    VMMRZCallRing3SetNotification(pVCpu, hmR0SvmCallRing3Callback, pCtx);
    51425142
     
    58435843        AssertPtr(pSvmTransient); \
    58445844        Assert(ASMIntAreEnabled()); \
    5845         HMSVM_ASSERT_PREEMPT_SAFE(); \
     5845        HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); \
    58465846        HMSVM_ASSERT_PREEMPT_CPUID_VAR(); \
    58475847        Log4Func(("vcpu[%u] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-\n", (uint32_t)pVCpu->idCpu)); \
    5848         HMSVM_ASSERT_PREEMPT_SAFE(); \
     5848        HMSVM_ASSERT_PREEMPT_SAFE(pVCpu); \
    58495849        if (VMMR0IsLogFlushDisabled(pVCpu)) \
    58505850            HMSVM_ASSERT_PREEMPT_CPUID(); \
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette