VirtualBox

Changeset 87563 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Feb 3, 2021 1:23:13 PM (4 years ago)
Author:
vboxsync
Message:

VMM/HM: Moved the 'ForRing3' and 'ForLog' bits to the back of the HM structure. Configuration bits too, except for VMX/SVM specific ones as those fit in alignment space (can be moved later if needed). bugref:9217

Location:
trunk/src/VBox/VMM
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMAll.cpp

    r87547 r87563  
    683683
    684684        if (   pVM->hm.s.vmx.fSupported
    685             && (  CTX_EXPR(pVM->hm.s.vmx.MsrsForRing3.ProcCtls.n.allowed1, g_HmMsrs.u.vmx.ProcCtls.n.allowed1, RT_NOTHING)
     685            && (  CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1, g_HmMsrs.u.vmx.ProcCtls.n.allowed1, RT_NOTHING)
    686686                & VMX_PROC_CTLS_USE_MSR_BITMAPS))
    687687            return true;
  • trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp

    r87538 r87563  
    186186    bool const fVGif    = RT_BOOL(g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
    187187#else
    188     bool const fVGif    = RT_BOOL(pVM->hm.s.svm.fFeaturesForRing3 & X86_CPUID_SVM_FEATURE_EDX_VGIF);
     188    bool const fVGif    = RT_BOOL(pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_VGIF);
    189189#endif
    190190    return fVGif && pVM->hm.s.svm.fVGif;
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r87547 r87563  
    768768    {
    769769        /* If bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */
    770         uint32_t uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s.vmx.MsrsForRing3.u64Cr0Fixed0, g_HmMsrs.u.vmx.u64Cr0Fixed0, RT_NOTHING);
     770        uint32_t uCr0Mask = (uint32_t)CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr0Fixed0, g_HmMsrs.u.vmx.u64Cr0Fixed0, RT_NOTHING);
    771771
    772772        /* We ignore the NE bit here on purpose; see HMR0.cpp for details. */
     
    787787
    788788        /* If bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */
    789         uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.vmx.MsrsForRing3.u64Cr0Fixed1, g_HmMsrs.u.vmx.u64Cr0Fixed1, RT_NOTHING);
     789        uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr0Fixed1, g_HmMsrs.u.vmx.u64Cr0Fixed1, RT_NOTHING);
    790790        if ((pCtx->cr0 & uCr0Mask) != 0)
    791791            return false;
    792792
    793793        /* If bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */
    794         uCr0Mask  = (uint32_t)CTX_EXPR(pVM->hm.s.vmx.MsrsForRing3.u64Cr4Fixed0, g_HmMsrs.u.vmx.u64Cr4Fixed0, RT_NOTHING);
     794        uCr0Mask  = (uint32_t)CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr4Fixed0, g_HmMsrs.u.vmx.u64Cr4Fixed0, RT_NOTHING);
    795795        uCr0Mask &= ~X86_CR4_VMXE;
    796796        if ((pCtx->cr4 & uCr0Mask) != uCr0Mask)
     
    798798
    799799        /* If bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */
    800         uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.vmx.MsrsForRing3.u64Cr4Fixed1, g_HmMsrs.u.vmx.u64Cr4Fixed1, RT_NOTHING);
     800        uCr0Mask = (uint32_t)~CTX_EXPR(pVM->hm.s.ForR3.vmx.Msrs.u64Cr4Fixed1, g_HmMsrs.u.vmx.u64Cr4Fixed1, RT_NOTHING);
    801801        if ((pCtx->cr4 & uCr0Mask) != 0)
    802802            return false;
  • trunk/src/VBox/VMM/VMMR0/HMR0.cpp

    r87559 r87563  
    11751175        pVM->hm.s.vmx.fUsePreemptTimerCfg           = pVM->hmr0.s.vmx.fUsePreemptTimer;
    11761176        pVM->hm.s.vmx.cPreemptTimerShift            = g_cHmVmxPreemptTimerShift;
    1177         pVM->hm.s.vmx.u64HostCr4ForRing3            = g_uHmVmxHostCr4;
    1178         pVM->hm.s.vmx.u64HostMsrEferForRing3        = g_uHmVmxHostMsrEfer;
    1179         pVM->hm.s.vmx.u64HostSmmMonitorCtlForRing3  = g_uHmVmxHostSmmMonitorCtl;
    1180         HMGetVmxMsrsFromHwvirtMsrs(&g_HmMsrs, &pVM->hm.s.vmx.MsrsForRing3);
     1177        pVM->hm.s.ForR3.vmx.u64HostCr4              = g_uHmVmxHostCr4;
     1178        pVM->hm.s.ForR3.vmx.u64HostMsrEfer          = g_uHmVmxHostMsrEfer;
     1179        pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl    = g_uHmVmxHostSmmMonitorCtl;
     1180        HMGetVmxMsrsFromHwvirtMsrs(&g_HmMsrs, &pVM->hm.s.ForR3.vmx.Msrs);
    11811181        /* If you need to tweak host MSRs for testing VMX R0 code, do it here. */
    11821182
    11831183        /* Enable VPID if supported and configured. */
    11841184        if (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VPID)
    1185             pVM->hm.s.vmx.fVpidForRing3
    1186                 = pVM->hmr0.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; /* Can be overridden by CFGM in HMR3Init(). */
     1185            pVM->hm.s.ForR3.vmx.fVpid = pVM->hmr0.s.vmx.fVpid = pVM->hm.s.vmx.fAllowVpid; /* Can be overridden by CFGM in HMR3Init(). */
    11871186
    11881187        /* Use VMCS shadowing if supported. */
    11891188        pVM->hmr0.s.vmx.fUseVmcsShadowing = pVM->cpum.ro.GuestFeatures.fVmx
    11901189                                         && (g_HmMsrs.u.vmx.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_VMCS_SHADOWING);
    1191         pVM->hm.s.vmx.fUseVmcsShadowingForRing3 = pVM->hmr0.s.vmx.fUseVmcsShadowing;
     1190        pVM->hm.s.ForR3.vmx.fUseVmcsShadowing = pVM->hmr0.s.vmx.fUseVmcsShadowing;
    11921191
    11931192        /* Use the VMCS controls for swapping the EFER MSR if supported. */
    1194         pVM->hm.s.vmx.fSupportsVmcsEferForRing3 = g_fHmVmxSupportsVmcsEfer;
     1193        pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer = g_fHmVmxSupportsVmcsEfer;
    11951194
    11961195#if 0
     
    12101209    else if (pVM->hm.s.svm.fSupported)
    12111210    {
    1212         pVM->hm.s.svm.u32Rev            = g_uHmSvmRev;
    1213         pVM->hm.s.svm.fFeaturesForRing3 = g_fHmSvmFeatures;
    1214         pVM->hm.s.svm.u64MsrHwcr        = g_HmMsrs.u.svm.u64MsrHwcr;
     1211        pVM->hm.s.ForR3.svm.u32Rev      = g_uHmSvmRev;
     1212        pVM->hm.s.ForR3.svm.fFeatures  = g_fHmSvmFeatures;
     1213        pVM->hm.s.ForR3.svm.u64MsrHwcr  = g_HmMsrs.u.svm.u64MsrHwcr;
    12151214        /* If you need to tweak host MSRs for testing SVM R0 code, do it here. */
    12161215    }
    1217     pVM->hm.s.rcInit              = g_rcHmInit;
    1218     pVM->hm.s.uMaxAsidForLog      = g_uHmMaxAsid;
     1216    pVM->hm.s.ForR3.rcInit      = g_rcHmInit;
     1217    pVM->hm.s.ForR3.uMaxAsid    = g_uHmMaxAsid;
    12191218
    12201219    /*
     
    12751274        pVCpu->hmr0.s.fWorldSwitcher = fWorldSwitcher;
    12761275    }
    1277     pVM->hm.s.fWorldSwitcherForLog = fWorldSwitcher;
     1276    pVM->hm.s.ForR3.fWorldSwitcher = fWorldSwitcher;
    12781277
    12791278
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r87561 r87563  
    994994    AssertReturn(pVM->hm.s.svm.fSupported, VERR_INCOMPATIBLE_CONFIG);
    995995    bool const fNestedPaging = pVM->hm.s.fNestedPagingCfg;
    996     AssertReturn(   !fNestedPaging
    997                  || (pVM->hm.s.svm.fFeaturesForRing3 & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
    998                  VERR_INCOMPATIBLE_CONFIG);
     996    AssertReturn(!fNestedPaging || (g_fHmSvmFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING), VERR_INCOMPATIBLE_CONFIG);
    999997    pVM->hmr0.s.fNestedPaging = fNestedPaging;
    1000998    pVM->hmr0.s.fAllow64BitGuests = pVM->hm.s.fAllow64BitGuestsCfg;
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r87562 r87563  
    14531453        VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    14541454    }
    1455     pVCpu->CTX_SUFF(pVM)->hm.s.rcInit = rc;
     1455    pVCpu->CTX_SUFF(pVM)->hm.s.ForR3.rcInit = rc;
    14561456}
    14571457
     
    17381738
    17391739        if (pVM)
    1740             pVM->hm.s.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
     1740            pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError = HCPhysCpuPage;
    17411741    }
    17421742
     
    33513351                    LogRelFunc(("Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
    33523352                pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
    3353                 pVM->hm.s.vmx.fVpidForRing3 = pVM->hmr0.s.vmx.fVpid = false;
     3353                pVM->hmr0.s.vmx.fVpid          = false;
    33543354            }
    33553355        }
     
    33593359            Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n"));
    33603360            pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NOT_SUPPORTED;
    3361             pVM->hm.s.vmx.fVpidForRing3 = pVM->hmr0.s.vmx.fVpid = false;
     3361            pVM->hmr0.s.vmx.fVpid          = false;
    33623362        }
    33633363    }
     
    33793379     * Copy out the result to ring-3.
    33803380     */
    3381     pVM->hm.s.vmx.fVpidForRing3           = pVM->hmr0.s.vmx.fVpid;
    3382     pVM->hm.s.vmx.enmTlbFlushTypeForRing3 = pVM->hmr0.s.vmx.enmTlbFlushType;
    3383     pVM->hm.s.vmx.enmTlbFlushEptForRing3  = pVM->hmr0.s.vmx.enmTlbFlushEpt;
    3384     pVM->hm.s.vmx.enmTlbFlushVpidForRing3 = pVM->hmr0.s.vmx.enmTlbFlushVpid;
     3381    pVM->hm.s.ForR3.vmx.fVpid           = pVM->hmr0.s.vmx.fVpid;
     3382    pVM->hm.s.ForR3.vmx.enmTlbFlushType = pVM->hmr0.s.vmx.enmTlbFlushType;
     3383    pVM->hm.s.ForR3.vmx.enmTlbFlushEpt  = pVM->hmr0.s.vmx.enmTlbFlushEpt;
     3384    pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid;
    33853385    return VINF_SUCCESS;
    33863386}
     
    34823482    pVM->hmr0.s.vmx.idLbrTosMsr = idLbrTosMsr;
    34833483
    3484     pVM->hm.s.vmx.idLbrFromIpMsrFirstForRing3 = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
    3485     pVM->hm.s.vmx.idLbrFromIpMsrLastForRing3  = pVM->hmr0.s.vmx.idLbrFromIpMsrLast  = idLbrFromIpMsrLast;
    3486 
    3487     pVM->hm.s.vmx.idLbrToIpMsrFirstForRing3   = pVM->hmr0.s.vmx.idLbrToIpMsrFirst   = idLbrToIpMsrFirst;
    3488     pVM->hm.s.vmx.idLbrToIpMsrLastForRing3    = pVM->hmr0.s.vmx.idLbrToIpMsrLast    = idLbrToIpMsrLast;
     3484    pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst = pVM->hmr0.s.vmx.idLbrFromIpMsrFirst = idLbrFromIpMsrFirst;
     3485    pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast  = pVM->hmr0.s.vmx.idLbrFromIpMsrLast  = idLbrFromIpMsrLast;
     3486
     3487    pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst   = pVM->hmr0.s.vmx.idLbrToIpMsrFirst   = idLbrToIpMsrFirst;
     3488    pVM->hm.s.ForR3.vmx.idLbrToIpMsrLast    = pVM->hmr0.s.vmx.idLbrToIpMsrLast    = idLbrToIpMsrLast;
    34893489    return VINF_SUCCESS;
    34903490}
     
    45374537
    45384538    /* Initialize these always, see hmR3InitFinalizeR0().*/
    4539     pVM->hm.s.vmx.enmTlbFlushEptForRing3  = pVM->hmr0.s.vmx.enmTlbFlushEpt  = VMXTLBFLUSHEPT_NONE;
    4540     pVM->hm.s.vmx.enmTlbFlushVpidForRing3 = pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
     4539    pVM->hm.s.ForR3.vmx.enmTlbFlushEpt  = pVM->hmr0.s.vmx.enmTlbFlushEpt  = VMXTLBFLUSHEPT_NONE;
     4540    pVM->hm.s.ForR3.vmx.enmTlbFlushVpid = pVM->hmr0.s.vmx.enmTlbFlushVpid = VMXTLBFLUSHVPID_NONE;
    45414541
    45424542    /* Setup the tagged-TLB flush handlers. */
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r87561 r87563  
    10571057    if (   !pVM->hm.s.vmx.fSupported
    10581058        && !pVM->hm.s.svm.fSupported
    1059         &&  pVM->hm.s.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
     1059        &&  pVM->hm.s.ForR3.rcInit == VERR_SVM_IN_USE /* implies functional AMD-V */
    10601060        &&  RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE"))
    10611061    {
     
    10631063        pVM->hm.s.svm.fSupported        = true;
    10641064        pVM->hm.s.svm.fIgnoreInUseError = true;
    1065         pVM->hm.s.rcInit = VINF_SUCCESS;
     1065        pVM->hm.s.ForR3.rcInit = VINF_SUCCESS;
    10661066    }
    10671067
     
    10721072        && !pVM->hm.s.svm.fSupported)
    10731073    {
    1074         LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.rcInit));
    1075         LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.MsrsForRing3.u64FeatCtrl));
    1076         switch (pVM->hm.s.rcInit)
     1074        LogRel(("HM: Failed to initialize VT-x / AMD-V: %Rrc\n", pVM->hm.s.ForR3.rcInit));
     1075        LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.ForR3.vmx.Msrs.u64FeatCtrl));
     1076        switch (pVM->hm.s.ForR3.rcInit)
    10771077        {
    10781078            case VERR_VMX_IN_VMX_ROOT_MODE:
     
    10981098                return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS");
    10991099        }
    1100         return VMSetError(pVM, pVM->hm.s.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.rcInit);
     1100        return VMSetError(pVM, pVM->hm.s.ForR3.rcInit, RT_SRC_POS, "HM ring-0 init failed: %Rrc", pVM->hm.s.ForR3.rcInit);
    11011101    }
    11021102
     
    11231123
    11241124    LogRel(("HM: fWorldSwitcher=%#x (fIbpbOnVmExit=%RTbool fIbpbOnVmEntry=%RTbool fL1dFlushOnVmEntry=%RTbool); fL1dFlushOnSched=%RTbool fMdsClearOnVmEntry=%RTbool\n",
    1125             pVM->hm.s.fWorldSwitcherForLog, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
     1125            pVM->hm.s.ForR3.fWorldSwitcher, pVM->hm.s.fIbpbOnVmExit, pVM->hm.s.fIbpbOnVmEntry, pVM->hm.s.fL1dFlushOnVmEntry,
    11261126            pVM->hm.s.fL1dFlushOnSched, pVM->hm.s.fMdsClearOnVmEntry));
    11271127
     
    15021502
    15031503    LogFunc(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported));
    1504     AssertLogRelReturn(pVM->hm.s.vmx.MsrsForRing3.u64FeatCtrl != 0, VERR_HM_IPE_4);
     1504    AssertLogRelReturn(pVM->hm.s.ForR3.vmx.Msrs.u64FeatCtrl != 0, VERR_HM_IPE_4);
    15051505
    15061506    LogRel(("HM: Using VT-x implementation 3.0\n"));
    15071507    LogRel(("HM: Max resume loops                  = %u\n",     pVM->hm.s.cMaxResumeLoopsCfg));
    1508     LogRel(("HM: Host CR4                          = %#RX64\n", pVM->hm.s.vmx.u64HostCr4ForRing3));
    1509     LogRel(("HM: Host EFER                         = %#RX64\n", pVM->hm.s.vmx.u64HostMsrEferForRing3));
    1510     LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL          = %#RX64\n", pVM->hm.s.vmx.u64HostSmmMonitorCtlForRing3));
    1511 
    1512     hmR3VmxReportFeatCtlMsr(pVM->hm.s.vmx.MsrsForRing3.u64FeatCtrl);
    1513     hmR3VmxReportBasicMsr(pVM->hm.s.vmx.MsrsForRing3.u64Basic);
    1514 
    1515     hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.PinCtls);
    1516     hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.ProcCtls);
    1517     if (pVM->hm.s.vmx.MsrsForRing3.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    1518         hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.vmx.MsrsForRing3.ProcCtls2);
    1519 
    1520     hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.EntryCtls);
    1521     hmR3VmxReportExitCtlsMsr(&pVM->hm.s.vmx.MsrsForRing3.ExitCtls);
    1522 
    1523     if (RT_BF_GET(pVM->hm.s.vmx.MsrsForRing3.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
     1508    LogRel(("HM: Host CR4                          = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostCr4));
     1509    LogRel(("HM: Host EFER                         = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostMsrEfer));
     1510    LogRel(("HM: MSR_IA32_SMM_MONITOR_CTL          = %#RX64\n", pVM->hm.s.ForR3.vmx.u64HostSmmMonitorCtl));
     1511
     1512    hmR3VmxReportFeatCtlMsr(pVM->hm.s.ForR3.vmx.Msrs.u64FeatCtrl);
     1513    hmR3VmxReportBasicMsr(pVM->hm.s.ForR3.vmx.Msrs.u64Basic);
     1514
     1515    hmR3VmxReportPinBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.PinCtls);
     1516    hmR3VmxReportProcBasedCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls);
     1517    if (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     1518        hmR3VmxReportProcBasedCtls2Msr(&pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2);
     1519
     1520    hmR3VmxReportEntryCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.EntryCtls);
     1521    hmR3VmxReportExitCtlsMsr(&pVM->hm.s.ForR3.vmx.Msrs.ExitCtls);
     1522
     1523    if (RT_BF_GET(pVM->hm.s.ForR3.vmx.Msrs.u64Basic, VMX_BF_BASIC_TRUE_CTLS))
    15241524    {
    15251525        /* We don't extensively dump the true capability MSRs as we don't use them, see @bugref{9180#c5}. */
    1526         LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS   = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TruePinCtls));
    1527         LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS  = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TrueProcCtls));
    1528         LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS      = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TrueEntryCtls));
    1529         LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS       = %#RX64\n", pVM->hm.s.vmx.MsrsForRing3.TrueExitCtls));
    1530     }
    1531 
    1532     hmR3VmxReportMiscMsr(pVM, pVM->hm.s.vmx.MsrsForRing3.u64Misc);
    1533     hmR3VmxReportVmcsEnumMsr(pVM->hm.s.vmx.MsrsForRing3.u64VmcsEnum);
    1534     if (pVM->hm.s.vmx.MsrsForRing3.u64EptVpidCaps)
    1535         hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.vmx.MsrsForRing3.u64EptVpidCaps);
    1536     if (pVM->hm.s.vmx.MsrsForRing3.u64VmFunc)
    1537         hmR3VmxReportVmFuncMsr(pVM->hm.s.vmx.MsrsForRing3.u64VmFunc);
    1538     hmR3VmxReportCrFixedMsrs(&pVM->hm.s.vmx.MsrsForRing3);
     1526        LogRel(("HM: MSR_IA32_VMX_TRUE_PINBASED_CTLS   = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TruePinCtls));
     1527        LogRel(("HM: MSR_IA32_VMX_TRUE_PROCBASED_CTLS  = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueProcCtls));
     1528        LogRel(("HM: MSR_IA32_VMX_TRUE_ENTRY_CTLS      = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueEntryCtls));
     1529        LogRel(("HM: MSR_IA32_VMX_TRUE_EXIT_CTLS       = %#RX64\n", pVM->hm.s.ForR3.vmx.Msrs.TrueExitCtls));
     1530    }
     1531
     1532    hmR3VmxReportMiscMsr(pVM, pVM->hm.s.ForR3.vmx.Msrs.u64Misc);
     1533    hmR3VmxReportVmcsEnumMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmcsEnum);
     1534    if (pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps)
     1535        hmR3VmxReportEptVpidCapsMsr(pVM->hm.s.ForR3.vmx.Msrs.u64EptVpidCaps);
     1536    if (pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc)
     1537        hmR3VmxReportVmFuncMsr(pVM->hm.s.ForR3.vmx.Msrs.u64VmFunc);
     1538    hmR3VmxReportCrFixedMsrs(&pVM->hm.s.ForR3.vmx.Msrs);
    15391539
    15401540#ifdef TODO_9217_VMCSINFO
     
    15641564     */
    15651565    AssertLogRelReturn(   !pVM->hm.s.fNestedPagingCfg
    1566                        || (pVM->hm.s.vmx.MsrsForRing3.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
     1566                       || (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_EPT),
    15671567                       VERR_HM_IPE_1);
    15681568    AssertLogRelReturn(   !pVM->hm.s.vmx.fUnrestrictedGuestCfg
    1569                        || (   (pVM->hm.s.vmx.MsrsForRing3.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
     1569                       || (   (pVM->hm.s.ForR3.vmx.Msrs.ProcCtls2.n.allowed1 & VMX_PROC_CTLS2_UNRESTRICTED_GUEST)
    15701570                           && pVM->hm.s.fNestedPagingCfg),
    15711571                       VERR_HM_IPE_1);
     
    15761576     * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel...
    15771577     */
    1578     if (   !(pVM->hm.s.vmx.MsrsForRing3.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
     1578    if (   !(pVM->hm.s.ForR3.vmx.Msrs.ProcCtls.n.allowed1 & VMX_PROC_CTLS_USE_SECONDARY_CTLS)
    15791579        && CPUMR3GetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP))
    15801580    {
     
    16621662    }
    16631663
    1664     LogRel(("HM: Supports VMCS EFER fields         = %RTbool\n", pVM->hm.s.vmx.fSupportsVmcsEferForRing3));
     1664    LogRel(("HM: Supports VMCS EFER fields         = %RTbool\n", pVM->hm.s.ForR3.vmx.fSupportsVmcsEfer));
    16651665    LogRel(("HM: Enabled VMX\n"));
    16661666    pVM->hm.s.vmx.fEnabled = true;
     
    16901690    {
    16911691        LogRel(("HM: Enabled nested paging\n"));
    1692         if (pVM->hm.s.vmx.enmTlbFlushEptForRing3 == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
     1692        if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_SINGLE_CONTEXT)
    16931693            LogRel(("HM:   EPT flush type                  = Single context\n"));
    1694         else if (pVM->hm.s.vmx.enmTlbFlushEptForRing3 == VMXTLBFLUSHEPT_ALL_CONTEXTS)
     1694        else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_ALL_CONTEXTS)
    16951695            LogRel(("HM:   EPT flush type                  = All contexts\n"));
    1696         else if (pVM->hm.s.vmx.enmTlbFlushEptForRing3 == VMXTLBFLUSHEPT_NOT_SUPPORTED)
     1696        else if (pVM->hm.s.ForR3.vmx.enmTlbFlushEpt == VMXTLBFLUSHEPT_NOT_SUPPORTED)
    16971697            LogRel(("HM:   EPT flush type                  = Not supported\n"));
    16981698        else
    1699             LogRel(("HM:   EPT flush type                  = %#x\n", pVM->hm.s.vmx.enmTlbFlushEptForRing3));
     1699            LogRel(("HM:   EPT flush type                  = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushEpt));
    17001700
    17011701        if (pVM->hm.s.vmx.fUnrestrictedGuestCfg)
     
    17121712        Assert(!pVM->hm.s.vmx.fUnrestrictedGuestCfg);
    17131713
    1714     if (pVM->hm.s.vmx.fVpidForRing3)
     1714    if (pVM->hm.s.ForR3.vmx.fVpid)
    17151715    {
    17161716        LogRel(("HM: Enabled VPID\n"));
    1717         if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_INDIV_ADDR)
     1717        if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_INDIV_ADDR)
    17181718            LogRel(("HM:   VPID flush type                 = Individual addresses\n"));
    1719         else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
     1719        else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT)
    17201720            LogRel(("HM:   VPID flush type                 = Single context\n"));
    1721         else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_ALL_CONTEXTS)
     1721        else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_ALL_CONTEXTS)
    17221722            LogRel(("HM:   VPID flush type                 = All contexts\n"));
    1723         else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
     1723        else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
    17241724            LogRel(("HM:   VPID flush type                 = Single context retain globals\n"));
    17251725        else
    1726             LogRel(("HM:   VPID flush type                 = %#x\n", pVM->hm.s.vmx.enmTlbFlushVpidForRing3));
    1727     }
    1728     else if (pVM->hm.s.vmx.enmTlbFlushVpidForRing3 == VMXTLBFLUSHVPID_NOT_SUPPORTED)
     1726            LogRel(("HM:   VPID flush type                 = %#x\n", pVM->hm.s.ForR3.vmx.enmTlbFlushVpid));
     1727    }
     1728    else if (pVM->hm.s.ForR3.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_NOT_SUPPORTED)
    17291729        LogRel(("HM: Ignoring VPID capabilities of CPU\n"));
    17301730
     
    17401740        LogRel(("HM: Enabled posted-interrupt processing support\n"));
    17411741
    1742     if (pVM->hm.s.vmx.fUseVmcsShadowingForRing3)
    1743     {
    1744         bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.vmx.MsrsForRing3.u64Misc & VMX_MISC_VMWRITE_ALL);
     1742    if (pVM->hm.s.ForR3.vmx.fUseVmcsShadowing)
     1743    {
     1744        bool const fFullVmcsShadow = RT_BOOL(pVM->hm.s.ForR3.vmx.Msrs.u64Misc & VMX_MISC_VMWRITE_ALL);
    17451745        LogRel(("HM: Enabled %s VMCS shadowing\n", fFullVmcsShadow ? "full" : "partial"));
    17461746    }
     
    17681768        LogRel(("HM: AMD Cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));
    17691769    LogRel(("HM: Max resume loops                  = %u\n",     pVM->hm.s.cMaxResumeLoopsCfg));
    1770     LogRel(("HM: AMD HWCR MSR                      = %#RX64\n", pVM->hm.s.svm.u64MsrHwcr));
    1771     LogRel(("HM: AMD-V revision                    = %#x\n",    pVM->hm.s.svm.u32Rev));
    1772     LogRel(("HM: AMD-V max ASID                    = %RU32\n",  pVM->hm.s.uMaxAsidForLog));
    1773     LogRel(("HM: AMD-V features                    = %#x\n",    pVM->hm.s.svm.fFeaturesForRing3));
     1770    LogRel(("HM: AMD HWCR MSR                      = %#RX64\n", pVM->hm.s.ForR3.svm.u64MsrHwcr));
     1771    LogRel(("HM: AMD-V revision                    = %#x\n",    pVM->hm.s.ForR3.svm.u32Rev));
     1772    LogRel(("HM: AMD-V max ASID                    = %RU32\n",  pVM->hm.s.ForR3.uMaxAsid));
     1773    LogRel(("HM: AMD-V features                    = %#x\n",    pVM->hm.s.ForR3.svm.fFeatures));
    17741774
    17751775    /*
     
    17961796    };
    17971797
    1798     uint32_t fSvmFeatures = pVM->hm.s.svm.fFeaturesForRing3;
     1798    uint32_t fSvmFeatures = pVM->hm.s.ForR3.svm.fFeatures;
    17991799    for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++)
    18001800        if (fSvmFeatures & s_aSvmFeatures[i].fFlag)
     
    18121812     */
    18131813    AssertLogRelReturn(   !pVM->hm.s.fNestedPagingCfg
    1814                        || (pVM->hm.s.svm.fFeaturesForRing3 & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
     1814                       || (pVM->hm.s.ForR3.svm.fFeatures & X86_CPUID_SVM_FEATURE_EDX_NESTED_PAGING),
    18151815                       VERR_HM_IPE_1);
    18161816
     
    28292829    PVM pVM = pUVM->pVM;
    28302830    VM_ASSERT_VALID_EXT_RETURN(pVM, false);
    2831     return pVM->hm.s.vmx.fVpidForRing3;
     2831    return pVM->hm.s.ForR3.vmx.fVpid;
    28322832}
    28332833
     
    30653065    if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM)
    30663066    {
    3067         LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1  %#RX32\n", pVM->hm.s.vmx.MsrsForRing3.EntryCtls.n.allowed1));
    3068         LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0  %#RX32\n", pVM->hm.s.vmx.MsrsForRing3.EntryCtls.n.allowed0));
     3067        LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-1  %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed1));
     3068        LogRel(("HM: VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed-0  %#RX32\n", pVM->hm.s.ForR3.vmx.Msrs.EntryCtls.n.allowed0));
    30693069    }
    30703070    else if (iStatusCode == VERR_VMX_INVALID_VMXON_PTR)
    3071         LogRel(("HM: HCPhysVmxEnableError         = %#RHp\n", pVM->hm.s.vmx.HCPhysVmxEnableError));
     3071        LogRel(("HM: HCPhysVmxEnableError         = %#RHp\n", pVM->hm.s.ForR3.vmx.HCPhysVmxEnableError));
    30723072}
    30733073
     
    33363336        {
    33373337            PCVMXVMCSINFOSHARED pVmcsInfoShared = hmGetVmxActiveVmcsInfoShared(pVCpu);
    3338             uint32_t const      cLbrStack       = pVM->hm.s.vmx.idLbrFromIpMsrLastForRing3 - pVM->hm.s.vmx.idLbrFromIpMsrFirstForRing3 + 1;
     3338            uint32_t const      cLbrStack       = pVM->hm.s.ForR3.vmx.idLbrFromIpMsrLast - pVM->hm.s.ForR3.vmx.idLbrFromIpMsrFirst + 1;
    33393339
    33403340            /** @todo r=ramshankar: The index technically varies depending on the CPU, but
     
    33593359            for (;;)
    33603360            {
    3361                 if (pVM->hm.s.vmx.idLbrToIpMsrFirstForRing3)
     3361                if (pVM->hm.s.ForR3.vmx.idLbrToIpMsrFirst)
    33623362                    pHlp->pfnPrintf(pHlp, "  Branch (%2u): From IP=%#016RX64 - To IP=%#016RX64\n", idxCurrent,
    33633363                                    pVmcsInfoShared->au64LbrFromIpMsr[idxCurrent], pVmcsInfoShared->au64LbrToIpMsr[idxCurrent]);
  • trunk/src/VBox/VMM/include/HMInternal.h

    r87562 r87563  
    440440    bool                        afAlignment1[5];
    441441
    442     /** @todo r=bird: for better cache locality for SVM, it would be good to split
    443      *        out the non-esssential data (i.e config and for-ring3 bits). */
    444442    struct
    445443    {
     
    450448        /** The shift mask employed by the VMX-Preemption timer (set by ring-0). */
    451449        uint8_t                     cPreemptTimerShift;
    452         bool                        afAlignment1[5];
    453 
    454         /** Pause-loop exiting (PLE) gap in ticks. */
    455         uint32_t                    cPleGapTicks;
    456         /** Pause-loop exiting (PLE) window in ticks. */
    457         uint32_t                    cPleWindowTicks;
    458 
    459         /** Virtual address of the TSS page used for real mode emulation. */
    460         R3PTRTYPE(PVBOXTSS)         pRealModeTSS;
    461         /** Virtual address of the identity page table used for real mode and protected
    462          *  mode without paging emulation in EPT mode. */
    463         R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
     450        bool                        fAlignment1;
    464451
    465452        /** @name Configuration (gets copied if problematic)
     
    477464        /** @} */
    478465
    479         /** @name For ring-3 consumption
    480          * @{ */
    481         /** Set if VPID is supported (ring-3 copy). */
    482         bool                        fVpidForRing3;
    483         /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX
    484          *  init, for logging). */
    485         bool                        fSupportsVmcsEferForRing3;
    486         /** Whether to use VMCS shadowing. */
    487         bool                        fUseVmcsShadowingForRing3;
    488         bool                        fAlignment2;
    489 
    490         /** Host CR4 value (set by ring-0 VMX init, for logging). */
    491         uint64_t                    u64HostCr4ForRing3;
    492         /** Host SMM monitor control (set by ring-0 VMX init, for logging). */
    493         uint64_t                    u64HostSmmMonitorCtlForRing3;
    494         /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */
    495         uint64_t                    u64HostMsrEferForRing3;
    496 
    497         /** The first valid host LBR branch-from-IP stack range. */
    498         uint32_t                    idLbrFromIpMsrFirstForRing3;
    499         /** The last valid host LBR branch-from-IP stack range. */
    500         uint32_t                    idLbrFromIpMsrLastForRing3;
    501 
    502         /** The first valid host LBR branch-to-IP stack range. */
    503         uint32_t                    idLbrToIpMsrFirstForRing3;
    504         /** The last valid host LBR branch-to-IP stack range. */
    505         uint32_t                    idLbrToIpMsrLastForRing3;
    506 
    507         /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */
    508         RTHCPHYS                    HCPhysVmxEnableError;
    509         /** VMX MSR values (only for ring-3 consumption). */
    510         VMXMSRS                     MsrsForRing3;
    511 
    512         /** Tagged-TLB flush type (only for ring-3 consumption). */
    513         VMXTLBFLUSHTYPE             enmTlbFlushTypeForRing3;
    514         /** Flush type to use for INVEPT (only for ring-3 consumption). */
    515         VMXTLBFLUSHEPT              enmTlbFlushEptForRing3;
    516         /** Flush type to use for INVVPID (only for ring-3 consumption). */
    517         VMXTLBFLUSHVPID             enmTlbFlushVpidForRing3;
    518         /** @} */
     466        /** Pause-loop exiting (PLE) gap in ticks. */
     467        uint32_t                    cPleGapTicks;
     468        /** Pause-loop exiting (PLE) window in ticks. */
     469        uint32_t                    cPleWindowTicks;
     470
     471        /** Virtual address of the TSS page used for real mode emulation. */
     472        R3PTRTYPE(PVBOXTSS)         pRealModeTSS;
     473        /** Virtual address of the identity page table used for real mode and protected
     474         *  mode without paging emulation in EPT mode. */
     475        R3PTRTYPE(PX86PD)           pNonPagingModeEPTPageTable;
    519476    } vmx;
    520477
     
    541498        uint16_t                    cPauseFilterThresholdTicks;
    542499        uint32_t                    u32Alignment2;
    543 
    544         /** @name For ring-3 consumption
    545          * @{ */
    546         /** SVM revision. */
    547         uint32_t                    u32Rev;
    548         /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */
    549         uint32_t                    fFeaturesForRing3;
    550         /** HWCR MSR (for diagnostics). */
    551         uint64_t                    u64MsrHwcr;
    552         /** @} */
    553500    } svm;
    554501
     
    566513    /** Size of the guest patch memory block. */
    567514    uint32_t                    cbGuestPatchMem;
    568 
    569     /** Last recorded error code during HM ring-0 init. */
    570     int32_t                     rcInit;
    571 
    572     /** Maximum ASID allowed.
    573      * This is mainly for the release log.  */
    574     uint32_t                    uMaxAsidForLog;
    575     /** World switcher flags (HM_WSF_XXX) for the release log. */
    576     uint32_t                    fWorldSwitcherForLog;
     515    uint32_t                    u32Alignment2;
     516
     517    /** For ring-3 use only. */
     518    struct
     519    {
     520        /** Last recorded error code during HM ring-0 init. */
     521        int32_t                     rcInit;
     522        uint32_t                    u32Alignment3;
     523
     524        /** Maximum ASID allowed.
     525         * This is mainly for the release log.  */
     526        uint32_t                    uMaxAsid;
     527        /** World switcher flags (HM_WSF_XXX) for the release log. */
     528        uint32_t                    fWorldSwitcher;
     529
     530        struct
     531        {
     532            /** Set if VPID is supported (ring-3 copy). */
     533            bool                        fVpid;
     534            /** Whether the CPU supports VMCS fields for swapping EFER (set by ring-0 VMX
     535             *  init, for logging). */
     536            bool                        fSupportsVmcsEfer;
     537            /** Whether to use VMCS shadowing. */
     538            bool                        fUseVmcsShadowing;
     539            bool                        fAlignment2;
     540
     541            /** Host CR4 value (set by ring-0 VMX init, for logging). */
     542            uint64_t                    u64HostCr4;
     543            /** Host SMM monitor control (set by ring-0 VMX init, for logging). */
     544            uint64_t                    u64HostSmmMonitorCtl;
     545            /** Host EFER value (set by ring-0 VMX init, for logging and guest NX). */
     546            uint64_t                    u64HostMsrEfer;
     547
     548            /** The first valid host LBR branch-from-IP stack range. */
     549            uint32_t                    idLbrFromIpMsrFirst;
     550            /** The last valid host LBR branch-from-IP stack range. */
     551            uint32_t                    idLbrFromIpMsrLast;
     552
     553            /** The first valid host LBR branch-to-IP stack range. */
     554            uint32_t                    idLbrToIpMsrFirst;
     555            /** The last valid host LBR branch-to-IP stack range. */
     556            uint32_t                    idLbrToIpMsrLast;
     557
     558            /** Host-physical address for a failing VMXON instruction (for diagnostics, ring-3). */
     559            RTHCPHYS                    HCPhysVmxEnableError;
     560            /** VMX MSR values (only for ring-3 consumption). */
     561            VMXMSRS                     Msrs;
     562
     563            /** Tagged-TLB flush type (only for ring-3 consumption). */
     564            VMXTLBFLUSHTYPE             enmTlbFlushType;
     565            /** Flush type to use for INVEPT (only for ring-3 consumption). */
     566            VMXTLBFLUSHEPT              enmTlbFlushEpt;
     567            /** Flush type to use for INVVPID (only for ring-3 consumption). */
     568            VMXTLBFLUSHVPID             enmTlbFlushVpid;
     569        } vmx;
     570
     571        struct
     572        {
     573            /** SVM revision. */
     574            uint32_t                    u32Rev;
     575            /** SVM feature bits from cpuid 0x8000000a, ring-3 copy. */
     576            uint32_t                    fFeatures;
     577            /** HWCR MSR (for diagnostics). */
     578            uint64_t                    u64MsrHwcr;
     579        } svm;
     580    } ForR3;
    577581
    578582    /** @name Configuration not used (much) after VM setup
     
    637641AssertCompileMemberAlignment(HM, vmx,                 8);
    638642AssertCompileMemberAlignment(HM, svm,                 8);
     643AssertCompileMemberAlignment(HM, StatTprPatchSuccess, 8);
     644AssertCompile(RTASSERT_OFFSET_OF(HM, PatchTree) <= 64); /* First cache line has the essentials for both VT-x and SVM operation. */
    639645
    640646
  • trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp

    r87543 r87563  
    368368
    369369    /* hm - 32-bit gcc won't align uint64_t naturally, so check. */
    370     CHECK_MEMBER_ALIGNMENT(HM, uMaxAsidForLog, 8);
    371370    CHECK_MEMBER_ALIGNMENT(HM, vmx, 8);
    372     CHECK_MEMBER_ALIGNMENT(HM, vmx.MsrsForRing3, 8);
    373371    CHECK_MEMBER_ALIGNMENT(HM, svm, 8);
     372    CHECK_MEMBER_ALIGNMENT(HM, ForR3.uMaxAsid, 8);
     373    CHECK_MEMBER_ALIGNMENT(HM, ForR3.vmx, 8);
    374374    CHECK_MEMBER_ALIGNMENT(HM, PatchTree, 8);
    375375    CHECK_MEMBER_ALIGNMENT(HM, aPatches, 8);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette