VirtualBox

Changeset 49520 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Nov 18, 2013 9:30:29 AM (11 years ago)
Author:
vboxsync
Message:

VMM: MSR auto-load/store handling rewrite for VT-x.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r49478 r49520  
    6464/** Use the function table. */
    6565#define HMVMX_USE_FUNCTION_TABLE
    66 
    67 /**
    68  * The maximum number of MSRs we are willing to swap during a world-switch.
    69  * Intel claims 512/check capability MSR, we don't want to do anywhere close
    70  * to that. See Intel spec. 24.7.2 "VM-Exit Controls for MSRs"
    71  *
    72  * Bump this count as and when required, there's no backward compatibility
    73  * requirement.
    74  */
    75 #define HMVMX_MAX_SWAP_MSR_COUNT                  5
    7666
    7767/** Determine which tagged-TLB flush handler to use. */
     
    9585#define HMVMX_UPDATED_GUEST_SEGMENT_REGS          RT_BIT(10)
    9686#define HMVMX_UPDATED_GUEST_DEBUG                 RT_BIT(11)
    97 #define HMVMX_UPDATED_GUEST_FS_BASE_MSR           RT_BIT(12)
    98 #define HMVMX_UPDATED_GUEST_GS_BASE_MSR           RT_BIT(13)
    99 #define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR       RT_BIT(14)
    100 #define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR      RT_BIT(15)
    101 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR      RT_BIT(16)
    102 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  RT_BIT(17)
    103 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE        RT_BIT(18)
    104 #define HMVMX_UPDATED_GUEST_APIC_STATE            RT_BIT(19)
     87#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR       RT_BIT(12)
     88#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR      RT_BIT(13)
     89#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR      RT_BIT(14)
     90#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS  RT_BIT(15)
     91#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE        RT_BIT(16)
     92#define HMVMX_UPDATED_GUEST_APIC_STATE            RT_BIT(17)
    10593#define HMVMX_UPDATED_GUEST_ALL                   (  HMVMX_UPDATED_GUEST_RIP                   \
    10694                                                   | HMVMX_UPDATED_GUEST_RSP                   \
     
    115103                                                   | HMVMX_UPDATED_GUEST_SEGMENT_REGS          \
    116104                                                   | HMVMX_UPDATED_GUEST_DEBUG                 \
    117                                                    | HMVMX_UPDATED_GUEST_FS_BASE_MSR           \
    118                                                    | HMVMX_UPDATED_GUEST_GS_BASE_MSR           \
    119105                                                   | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR       \
    120106                                                   | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR      \
     
    309295    VMXMSREXIT_PASSTHRU_READ
    310296} VMXMSREXITREAD;
     297/** Pointer to MSR-bitmap read permissions. */
     298typedef VMXMSREXITREAD* PVMXMSREXITREAD;
    311299
    312300/**
     
    320308    VMXMSREXIT_PASSTHRU_WRITE
    321309} VMXMSREXITWRITE;
     310/** Pointer to MSR-bitmap write permissions. */
     311typedef VMXMSREXITWRITE* PVMXMSREXITWRITE;
     312
    322313
    323314/**
     
    345336static void               hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
    346337static void               hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
    347 static void               hmR0VmxClearEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx);
    348338static int                hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,
    349339                                                 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntState);
     
    413403static int          hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
    414404static int          hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
     405#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    415406static int          hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
     407#endif
    416408static uint32_t     hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
    417409
     
    589581 *
    590582 * @returns VBox status code.
    591  * @param   pVCpu           Pointer to the VMCPU.
    592583 * @param   pVmxTransient   Pointer to the VMX transient structure.
    593584 *
    594585 * @remarks No-long-jump zone!!!
    595586 */
    596 DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     587DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
    597588{
    598589    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
     
    607598 *
    608599 * @returns VBox status code.
    609  * @param   pVCpu           Pointer to the VMCPU.
    610600 * @param   pVmxTransient   Pointer to the VMX transient structure.
    611601 */
    612 DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     602DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient)
    613603{
    614604    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
     
    627617 *
    628618 * @returns VBox status code.
    629  * @param   pVCpu           Pointer to the VMCPU.
    630619 * @param   pVmxTransient   Pointer to the VMX transient structure.
    631620 */
    632 DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     621DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
    633622{
    634623    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
     
    650639 * @param   pVmxTransient   Pointer to the VMX transient structure.
    651640 */
    652 DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     641DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient)
    653642{
    654643    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
     
    667656 *
    668657 * @returns VBox status code.
    669  * @param   pVCpu           The cross context per CPU structure.
    670658 * @param   pVmxTransient   Pointer to the VMX transient structure.
    671659 */
    672 DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     660DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient)
    673661{
    674662    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO))
     
    686674 *
    687675 * @returns VBox status code.
    688  * @param   pVCpu           Pointer to the VMCPU.
    689676 * @param   pVmxTransient   Pointer to the VMX transient structure.
    690677 */
    691 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
     678DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMXTRANSIENT pVmxTransient)
    692679{
    693680    if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
     
    11631150
    11641151
     1152#ifdef VBOX_STRICT
     1153/**
     1154 * Gets the permission bits for the specified MSR in the MSR bitmap.
     1155 *
     1156 * @returns VBox status code.
     1157 * @retval VINF_SUCCESS        if the specified MSR is found.
     1158 * @retval VERR_NOT_FOUND      if the specified MSR is not found.
     1159 * @retval VERR_NOT_SUPPORTED  if VT-x doesn't allow the MSR.
     1160 *
     1161 * @param   pVCpu           Pointer to the VMCPU.
     1162 * @param   uMsr            The MSR.
     1163 * @param   penmRead        Where to store the read permissions.
     1164 * @param   penmWrite       Where to store the write permissions.
     1165 */
     1166static int hmR0VmxGetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, PVMXMSREXITREAD penmRead, PVMXMSREXITWRITE penmWrite)
     1167{
     1168    AssertPtrReturn(penmRead,  VERR_INVALID_PARAMETER);
     1169    AssertPtrReturn(penmWrite, VERR_INVALID_PARAMETER);
     1170    int32_t iBit;
     1171    uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
     1172
     1173    /* See hmR0VmxSetMsrPermission() for the layout. */
     1174    if (uMsr <= 0x00001FFF)
     1175        iBit = uMsr;
     1176    else if (   uMsr >= 0xC0000000
     1177             && uMsr <= 0xC0001FFF)
     1178    {
     1179        iBit = (uMsr - 0xC0000000);
     1180        pbMsrBitmap += 0x400;
     1181    }
     1182    else
     1183    {
     1184        AssertMsgFailed(("hmR0VmxGetMsrPermission: Invalid MSR %#RX32\n", uMsr));
     1185        return VERR_NOT_SUPPORTED;
     1186    }
     1187
     1188    Assert(iBit <= 0x1fff);
     1189    if (ASMBitTest(pbMsrBitmap, iBit))
     1190        *penmRead = VMXMSREXIT_INTERCEPT_READ;
     1191    else
     1192        *penmRead = VMXMSREXIT_PASSTHRU_READ;
     1193
     1194    if (ASMBitTest(pbMsrBitmap + 0x800, iBit))
     1195        *penmWrite = VMXMSREXIT_INTERCEPT_WRITE;
     1196    else
     1197        *penmWrite = VMXMSREXIT_PASSTHRU_WRITE;
     1198    return VINF_SUCCESS;
     1199}
     1200#endif /* VBOX_STRICT */
     1201
     1202
    11651203#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    11661204/**
     
    11741212DECLINLINE(int) hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)
    11751213{
    1176     /* Update the VCPU's copy of the guest MSR count. */
    1177     pVCpu->hm.s.vmx.cGuestMsrs = cMsrs;
     1214    /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
     1215    uint32_t const cMaxSupportedMsrs = MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc);
     1216    if (RT_UNLIKELY(cMsrs > cMaxSupportedMsrs))
     1217    {
     1218        LogRel(("CPU auto-load/store MSR count in VMCS exceeded cMsrs=%u Supported=%u.\n", cMsrs, cMaxSupportedMsrs));
     1219        pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
     1220        return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
     1221    }
    11781222
    11791223    /* Update number of guest MSRs to load/store across the world-switch. */
     
    11831227    /* Update number of host MSRs to load after the world-switch. Identical to guest-MSR count as it's always paired. */
    11841228    rc     = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT,  cMsrs);  AssertRCReturn(rc, rc);
     1229
     1230    /* Update the VCPU's copy of the MSR count. */
     1231    pVCpu->hm.s.vmx.cMsrs = cMsrs;
     1232
    11851233    return VINF_SUCCESS;
    11861234}
     
    11881236
    11891237/**
    1190  * Adds a guest/host MSR pair to be swapped during the world-switch as
    1191  * part of the auto-load/store MSR area in the VMCS.
     1238 * Adds a new (or updates the value of an existing) guest/host MSR
     1239 * pair to be swapped during the world-switch as part of the
     1240 * auto-load/store MSR area in the VMCS.
     1241 *
     1242 * @returns VBox status code.
     1243 * @param   pVCpu           Pointer to the VMCPU.
     1244 * @param   uMsr            The MSR.
     1245 * @param   uGuestMsr       Value of the guest MSR.
     1246 * @param   fUpdateHostMsr  Whether to update the value of the host MSR if
     1247 *                          necessary.
     1248 */
     1249static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, bool fUpdateHostMsr)
     1250{
     1251    PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     1252    uint32_t    cMsrs     = pVCpu->hm.s.vmx.cMsrs;
     1253    uint32_t    i;
     1254    for (i = 0; i < cMsrs; i++)
     1255    {
     1256        if (pGuestMsr->u32Msr == uMsr)
     1257            break;
     1258        pGuestMsr++;
     1259    }
     1260
     1261    bool fAdded = false;
     1262    if (i == cMsrs)
     1263    {
     1264        ++cMsrs;
     1265        int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
     1266        AssertRCReturn(rc, rc);
     1267
     1268        /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
     1269        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     1270            hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     1271
     1272        fAdded = true;
     1273    }
     1274
     1275    /* Update the MSR values in the auto-load/store MSR area. */
     1276    pGuestMsr->u32Msr    = uMsr;
     1277    pGuestMsr->u64Value  = uGuestMsrValue;
     1278
     1279    /* Create/update the MSR slot in the host MSR area. */
     1280    PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
     1281    pHostMsr += i;
     1282    pHostMsr->u32Msr     = uMsr;
     1283
     1284    /*
     1285     * Update the host MSR only when requested by the called AND when we're
     1286     * adding it to the auto-load/store area. Otherwise, it would have been
     1287     * updated by hmR0VmxSaveHostMsrs(). We do this for performance reasons.
     1288     */
     1289    if (   fAdded
     1290        && fUpdateHostMsr)
     1291    {
     1292        pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
     1293    }
     1294
     1295    return VINF_SUCCESS;
     1296}
     1297
     1298
     1299/**
     1300 * Removes a guest/shost MSR pair to be swapped during the world-switch from the
     1301 * auto-load/store MSR area in the VMCS.
     1302 *
     1303 * Does not fail if the MSR in @a uMsr is not found in the auto-load/store MSR
     1304 * area.
    11921305 *
    11931306 * @returns VBox status code.
    11941307 * @param   pVCpu       Pointer to the VMCPU.
    11951308 * @param   uMsr        The MSR.
    1196  * @param   uGuestMsr   Value of the guest MSR.
    1197  * @param   uHostMsr    Value of the host MSR.
    1198  */
    1199 static int hmR0VmxAddAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr, uint64_t uGuestMsrValue, uint64_t uHostMsrValue)
    1200 {
    1201     AssertMsg(HMVMX_MAX_SWAP_MSR_COUNT < MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc),
    1202               ("MSR swap count exceeded. Cpu reports %#RX32, our limit %#RX32\n",
    1203                MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc), HMVMX_MAX_SWAP_MSR_COUNT));
    1204 
    1205     PVMXAUTOMSR pGuestMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1206     uint32_t    cGuestMsrs = pVCpu->hm.s.vmx.cGuestMsrs;
    1207     uint32_t    i;
    1208     for (i = 0; i < cGuestMsrs; i++)
    1209     {
    1210         if (pGuestMsr->u32Msr == uMsr)
    1211             break;
    1212         pGuestMsr++;
    1213     }
    1214 
    1215     AssertReturn(i < HMVMX_MAX_SWAP_MSR_COUNT, VERR_HM_MSR_SWAP_COUNT_EXCEEDED);
    1216     if (i == cGuestMsrs)
    1217     {
    1218         ++cGuestMsrs;
    1219         /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    1220         if (RT_UNLIKELY(cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.u64Misc)))
    1221         {
    1222             LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
    1223             pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    1224             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    1225         }
    1226 
    1227         int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cGuestMsrs);
    1228         AssertRCReturn(rc, rc);
    1229     }
    1230 
    1231     /* Update the MSR values in the auto-load/store MSR area. */
    1232     pGuestMsr->u32Msr    = uMsr;
    1233     pGuestMsr->u64Value  = uGuestMsrValue;
    1234 
    1235     PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1236     pHostMsr += i;
    1237     pHostMsr->u32Msr     = uMsr;
    1238     pHostMsr->u64Value   = uHostMsrValue;
    1239 
    1240     /* Now that we're swapping MSRs during the world-switch, allow the guest to read/write them without causing VM-exits. */
    1241     Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS);
    1242     hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    1243     return VINF_SUCCESS;
    1244 }
    1245 
    1246 
    1247 /**
    1248  * Removes a guest/shost MSR pair to be swapped during the world-switch from the
    1249  * auto-load/store MSR area in the VMCS.
    1250  *
    1251  * Does not fail if the MSR in @a uMsr is not found in the auto-load/store MSR
    1252  * area.
    1253  *
    1254  * @returns VBox status code.
    1255  * @param   pVCpu       Pointer to the VMCPU.
    1256  * @param   uMsr        The MSR.
    12571309 */
    12581310static int hmR0VmxRemoveAutoLoadStoreMsr(PVMCPU pVCpu, uint32_t uMsr)
    12591311{
    1260     PVMXAUTOMSR pGuestMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1261     uint32_t    cGuestMsrs = pVCpu->hm.s.vmx.cGuestMsrs;
    1262     uint32_t    i;
    1263     for (i = 0; i < cGuestMsrs; i++)
     1312    PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     1313    uint32_t    cMsrs     = pVCpu->hm.s.vmx.cMsrs;
     1314    for (uint32_t i = 0; i < cMsrs; i++)
    12641315    {
    12651316        /* Find the MSR. */
     
    12671318        {
    12681319            /* If it's the last MSR, simply reduce the count. */
    1269             if (i == cGuestMsrs - 1)
     1320            if (i == cMsrs - 1)
    12701321            {
    1271                 --cGuestMsrs;
     1322                --cMsrs;
    12721323                break;
    12731324            }
     
    12751326            /* Remove it by swapping the last MSR in place of it, and reducing the count. */
    12761327            PVMXAUTOMSR pLastGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    1277             pLastGuestMsr            += cGuestMsrs;
     1328            pLastGuestMsr            += cMsrs;
    12781329            pGuestMsr->u32Msr         = pLastGuestMsr->u32Msr;
    12791330            pGuestMsr->u64Value       = pLastGuestMsr->u64Value;
     
    12811332            PVMXAUTOMSR pHostMsr     = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    12821333            PVMXAUTOMSR pLastHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1283             pLastHostMsr            += cGuestMsrs;
     1334            pLastHostMsr            += cMsrs;
    12841335            pHostMsr->u32Msr         = pLastHostMsr->u32Msr;
    12851336            pHostMsr->u64Value       = pLastHostMsr->u64Value;
    1286             --cGuestMsrs;
     1337            --cMsrs;
    12871338            break;
    12881339        }
     
    12911342
    12921343    /* Update the VMCS if the count changed (meaning the MSR was found). */
    1293     if (cGuestMsrs != pVCpu->hm.s.vmx.cGuestMsrs)
    1294     {
    1295         int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cGuestMsrs);
     1344    if (cMsrs != pVCpu->hm.s.vmx.cMsrs)
     1345    {
     1346        int rc = hmR0VmxSetAutoLoadStoreMsrCount(pVCpu, cMsrs);
    12961347        AssertRCReturn(rc, rc);
    1297     }
    1298 
    1299     /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
    1300     hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
     1348
     1349        /* We're no longer swapping MSRs during the world-switch, intercept guest read/writes to them. */
     1350        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     1351            hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE);
     1352    }
     1353
    13011354    return VINF_SUCCESS;
    13021355}
     
    13041357
    13051358/**
    1306  * Updates the value of a host MSR in the auto-load/store area in the VMCS.
    1307  *
    1308  * @returns VBox status code.
    1309  * @param   pVCpu               Pointer to the VMCPU.
    1310  * @param   uMsr                The MSR.
    1311  */
    1312 static int hmR0VmxUpdateAutoLoadStoreHostMsr(PVMCPU pVCpu, uint32_t uMsr)
    1313 {
    1314     PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    1315     uint32_t    cMsrs    = pVCpu->hm.s.vmx.cGuestMsrs;
    1316 
    1317     for (uint32_t i = 0; i < cMsrs; i++)
    1318     {
    1319         if (pHostMsr->u32Msr == uMsr)
    1320         {
    1321             pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
    1322             return VINF_SUCCESS;
    1323         }
    1324     }
    1325 
    1326     return VERR_NOT_FOUND;
    1327 }
    1328 
     1359 * Checks if the specified guest MSR is part of the auto-load/store area in
     1360 * the VMCS.
     1361 *
     1362 * @returns true if found, false otherwise.
     1363 * @param   pVCpu       Pointer to the VMCPU.
     1364 * @param   uMsr        The MSR to find.
     1365 */
     1366static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr)
     1367{
     1368    PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     1369    uint32_t    cMsrs     = pVCpu->hm.s.vmx.cMsrs;
     1370
     1371    for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++)
     1372    {
     1373        if (pGuestMsr->u32Msr == uMsr)
     1374            return true;
     1375    }
     1376    return false;
     1377}
     1378
     1379
     1380/**
     1381 * Updates the value of all host MSRs in the auto-load/store area in the VMCS.
     1382 *
     1383 * @param   pVCpu           Pointer to the VMCPU.
     1384 */
     1385static void hmR0VmxUpdateAutoLoadStoreHostMsrs(PVMCPU pVCpu)
     1386{
     1387    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1388    PVMXAUTOMSR pHostMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
     1389    PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     1390    uint32_t    cMsrs    = pVCpu->hm.s.vmx.cMsrs;
     1391
     1392    for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
     1393    {
     1394        AssertReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr);
     1395        pHostMsr->u64Value = ASMRdMsr(pHostMsr->u32Msr);
     1396    }
     1397
     1398    pVCpu->hm.s.vmx.fUpdatedHostMsrs = true;
     1399}
     1400
     1401
     1402#ifdef VBOX_STRICT
     1403/**
     1404 * Verifies whether the guest/host MSR pairs in the auto-load/store area in the
     1405 * VMCS are correct.
     1406 *
     1407 * @param   pVCpu           Pointer to the VMCPU.
     1408 */
     1409static void hmR0VmxCheckAutoLoadStoreMsrs(PVMCPU pVCpu)
     1410{
     1411    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1412
     1413    /* Verify MSR counts in the VMCS are what we think it should be.  */
     1414    uint32_t cMsrs;
     1415    int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &cMsrs);  AssertRC(rc);
     1416    Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
     1417
     1418    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &cMsrs);      AssertRC(rc);
     1419    Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
     1420
     1421    rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &cMsrs);       AssertRC(rc);
     1422    Assert(cMsrs == pVCpu->hm.s.vmx.cMsrs);
     1423
     1424    PVMXAUTOMSR pHostMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
     1425    PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     1426    for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++)
     1427    {
     1428        /* Verify that the MSRs are paired properly and that the host MSR has the correct value. */
     1429        AssertMsgReturnVoid(pHostMsr->u32Msr == pGuestMsr->u32Msr, ("HostMsr=%#RX32 GuestMsr=%#RX32\n", pHostMsr->u32Msr,
     1430                                                                    pGuestMsr->u32Msr));
     1431
     1432        uint64_t u64Msr = ASMRdMsr(pHostMsr->u32Msr);
     1433        AssertMsgReturnVoid(pHostMsr->u64Value == u64Msr, ("u32Msr=%#RX32 VMCS Value=%#RX64 ASMRdMsr=%#RX64\n", pHostMsr->u32Msr,
     1434                                                           pHostMsr->u64Value, u64Msr));
     1435
     1436        /* Verify that the permissions are as expected in the MSR bitmap. */
     1437        if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     1438        {
     1439            VMXMSREXITREAD  enmRead;
     1440            VMXMSREXITWRITE enmWrite;
     1441            rc = hmR0VmxGetMsrPermission(pVCpu, pGuestMsr->u32Msr, &enmRead, &enmWrite);
     1442            AssertMsgReturnVoid(rc == VINF_SUCCESS, ("hmR0VmxGetMsrPermission! failed. rc=%Rrc\n", rc));
     1443            AssertMsgReturnVoid(enmRead == VMXMSREXIT_PASSTHRU_READ, ("u32Msr=%#RX32 No passthru read permission!\n",
     1444                                                                      pGuestMsr->u32Msr));
     1445            AssertMsgReturnVoid(enmWrite == VMXMSREXIT_PASSTHRU_WRITE, ("u32Msr=%#RX32 No passthru write permission!\n",
     1446                                                                        pGuestMsr->u32Msr));
     1447        }
     1448    }
     1449}
     1450# endif /* VBOX_STRICT */
    13291451#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    13301452
     
    13811503static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
    13821504{
     1505    NOREF(pVM);
    13831506    AssertPtr(pVM);
    13841507    Assert(pVM->hm.s.vmx.fVpid);
     
    14641587VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
    14651588{
     1589    NOREF(pVM); NOREF(GCPhys);
    14661590    LogFlowFunc(("%RGp\n", GCPhys));
    14671591
     
    20462170        /*
    20472171         * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
    2048          * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
     2172         * automatically as dedicated fields in the VMCS.
    20492173         */
    20502174        hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS,  VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    20512175        hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    20522176        hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2053         hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR,          VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2054         hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR,           VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2055         hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2056         hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    20572177        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    20582178        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
     
    21202240
    21212241        if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    2122         {
    21232242            val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;                     /* Enable RDTSCP support. */
    2124             if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    2125                 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
    2126         }
    21272243
    21282244        if ((val & zap) != val)
     
    21602276static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
    21612277{
     2278    NOREF(pVM);
    21622279    AssertPtr(pVM);
    21632280    AssertPtr(pVCpu);
     
    24292546DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
    24302547{
     2548    NOREF(pVM); NOREF(pVCpu);
     2549
    24312550    RTCCUINTREG uReg = ASMGetCR0();
    24322551    int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
     
    24892608DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
    24902609{
     2610    NOREF(pVM);
    24912611    int rc = VERR_INTERNAL_ERROR_5;
    24922612
     
    27142834DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
    27152835{
     2836    NOREF(pVM);
     2837
    27162838    AssertPtr(pVCpu);
    27172839    AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
     
    27192841    int rc = VINF_SUCCESS;
    27202842#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     2843#if 0
    27212844    PVMXAUTOMSR  pHostMsr       = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
    27222845    uint32_t     cHostMsrs      = 0;
     
    27622885        if (CPUMIsGuestInLongMode(pVCpu))
    27632886        {
    2764             /* Must match the EFER value in our 64 bits switcher. */
     2887            /* Must match the EFER value in our 64-bit switcher. */
    27652888            pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
    27662889        }
     
    28122935
    28132936    rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
     2937#endif
     2938
     2939    if (pVCpu->hm.s.vmx.cMsrs > 0)
     2940        hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu);
    28142941#endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    28152942
     
    29233050DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    29243051{
     3052    NOREF(pMixedCtx);
     3053
    29253054    int rc = VINF_SUCCESS;
    29263055    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS))
     
    29923121DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    29933122{
     3123    NOREF(pMixedCtx);
     3124
    29943125    int rc = VINF_SUCCESS;
    29953126    if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE))
     
    30873218static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
    30883219{
     3220    NOREF(pVCpu);
    30893221    AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState));   /* Bits 31:4 MBZ. */
    30903222    Assert((uIntrState & 0x3) != 0x3);                              /* Block-by-STI and MOV SS cannot be simultaneously set. */
     
    35383670                /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
    35393671                u32GuestCR4 |= X86_CR4_PSE;
    3540                 /* Our identity mapping is a 32 bits page directory. */
     3672                /* Our identity mapping is a 32-bit page directory. */
    35413673                u32GuestCR4 &= ~X86_CR4_PAE;
    35423674            }
     
    39534085 * @param   idxAccess   Index of the access rights of the segment in the VMCS.
    39544086 * @param   pSelReg     Pointer to the segment selector.
    3955  * @param   pCtx        Pointer to the guest-CPU context.
    39564087 *
    39574088 * @remarks No-long-jump zone!!!
    39584089 */
    39594090static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
    3960                                        uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
     4091                                       uint32_t idxAccess, PCPUMSELREG pSelReg)
    39614092{
    39624093    int rc = VMXWriteVmcs32(idxSel,    pSelReg->Sel);       /* 16-bit guest selector field. */
     
    40494180#endif
    40504181        rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
    4051                                      VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
     4182                                     VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs);
    40524183        AssertRCReturn(rc, rc);
    40534184        rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
    4054                                      VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
     4185                                     VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss);
    40554186        AssertRCReturn(rc, rc);
    40564187        rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
    4057                                      VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
     4188                                     VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds);
    40584189        AssertRCReturn(rc, rc);
    40594190        rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
    4060                                      VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
     4191                                     VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es);
    40614192        AssertRCReturn(rc, rc);
    40624193        rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
    4063                                      VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
     4194                                     VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs);
    40644195        AssertRCReturn(rc, rc);
    40654196        rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
    4066                                      VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
     4197                                     VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs);
    40674198        AssertRCReturn(rc, rc);
    40684199
     
    42174348 * areas. These MSRs will automatically be loaded to the host CPU on every
    42184349 * successful VM entry and stored from the host CPU on every successful VM exit.
     4350 *
     4351 * This also creates/updates MSR slots for the host MSRs. The actual host
     4352 * MSR values are -not- updated here for performance reasons. See
     4353 * hmR0VmxSaveHostMsrs().
     4354 *
    42194355 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
    42204356 *
     
    42334369
    42344370    /*
    4235      * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
     4371     * Shared MSRs that we use the auto-load/store MSR area in the VMCS.
    42364372     */
    42374373    int rc = VINF_SUCCESS;
     
    42394375    {
    42404376#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    4241         PVM         pVM        = pVCpu->CTX_SUFF(pVM);
    4242         PVMXAUTOMSR pGuestMsr  = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    4243         uint32_t    cGuestMsrs = 0;
    4244 
    42454377        /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
    4246         /** @todo r=ramshankar: Optimize this further to do lazy restoration and only
    4247          *        when the guest really is in 64-bit mode. */
     4378        PVM pVM = pVCpu->CTX_SUFF(pVM);
    42484379        bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
    42494380        if (fSupportsLongMode)
    42504381        {
    4251             pGuestMsr->u32Msr      = MSR_K8_LSTAR;
    4252             pGuestMsr->u32Reserved = 0;
    4253             pGuestMsr->u64Value    = pMixedCtx->msrLSTAR;           /* 64 bits mode syscall rip */
    4254             pGuestMsr++; cGuestMsrs++;
    4255             pGuestMsr->u32Msr      = MSR_K6_STAR;
    4256             pGuestMsr->u32Reserved = 0;
    4257             pGuestMsr->u64Value    = pMixedCtx->msrSTAR;            /* legacy syscall eip, cs & ss */
    4258             pGuestMsr++; cGuestMsrs++;
    4259             pGuestMsr->u32Msr      = MSR_K8_SF_MASK;
    4260             pGuestMsr->u32Reserved = 0;
    4261             pGuestMsr->u64Value    = pMixedCtx->msrSFMASK;          /* syscall flag mask */
    4262             pGuestMsr++; cGuestMsrs++;
    4263             pGuestMsr->u32Msr      = MSR_K8_KERNEL_GS_BASE;
    4264             pGuestMsr->u32Reserved = 0;
    4265             pGuestMsr->u64Value    = pMixedCtx->msrKERNELGSBASE;    /* swapgs exchange value */
    4266             pGuestMsr++; cGuestMsrs++;
    4267         }
    4268 
    4269         /*
    4270          * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
    4271          * load the guest's copy always (since the MSR bitmap allows passthru unconditionally).
    4272          */
    4273         if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
    4274         {
    4275             pGuestMsr->u32Msr      = MSR_K8_TSC_AUX;
    4276             pGuestMsr->u32Reserved = 0;
    4277             rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
    4278             AssertRCReturn(rc, rc);
    4279             pGuestMsr++; cGuestMsrs++;
    4280         }
    4281 
    4282         /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
    4283         if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.Msrs.u64Misc))
    4284         {
    4285             LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
    4286             pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
    4287             return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
    4288         }
    4289 
    4290         /* Update the VCPU's copy of the guest MSR count. */
    4291         pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
    4292         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs);          AssertRCReturn(rc, rc);
    4293         rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs);          AssertRCReturn(rc, rc);
     4382            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR,          pMixedCtx->msrLSTAR,        false /* fUpdateHostMsr */);
     4383            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR,           pMixedCtx->msrSTAR,         false /* fUpdateHostMsr */);
     4384            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_SF_MASK,        pMixedCtx->msrSFMASK,       false /* fUpdateHostMsr */);
     4385            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_KERNEL_GS_BASE, pMixedCtx->msrKERNELGSBASE, false /* fUpdateHostMsr */);
     4386        }
     4387
     4388# ifdef DEBUG
     4389        PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     4390        for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
     4391            Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value));
     4392# endif
    42944393#endif  /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    42954394
     
    43374436static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
    43384437{
     4438    NOREF(pCtx);
    43394439    /** @todo See if we can make use of other states, e.g.
    43404440     *        VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT.  */
     
    44554555            int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
    44564556            rc    |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
    4457             rc    |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     4557            rc    |= hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    44584558            AssertRC(rc);
    44594559
     
    46344734            break;
    46354735    }
    4636     NOREF(pVM);
     4736    NOREF(pVM); NOREF(pCtx);
    46374737}
    46384738
     
    47654865
    47664866/**
    4767  * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
     4867 * Prepares for and executes VMLAUNCH (64-bit guests) for 32-bit hosts
    47684868 * supporting 64-bit guests.
    47694869 *
     
    49405040 * @param   pVCpu           Pointer to the VMCPU.
    49415041 * @param   idxField        The VMCS field encoding.
    4942  * @param   u64Val          16, 32 or 64 bits value.
     5042 * @param   u64Val          16, 32 or 64-bit value.
    49435043 */
    49445044VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
     
    50375137 * @param   pVCpu       Pointer to the VMCPU.
    50385138 * @param   idxField    The VMCS field encoding.
    5039  * @param   u64Val      16, 32 or 64 bits value.
     5139 * @param   u64Val      16, 32 or 64-bit value.
    50405140 */
    50415141VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
     
    51135213 * @returns VBox status code.
    51145214 * @param   pVCpu           Pointer to the VMCPU.
    5115  * @param   pMixedCtx       Pointer to the guest-CPU context. The data may be
    5116  *                          out-of-sync. Make sure to update the required fields
    5117  *                          before using them.
     5215 *
    51185216 * @remarks No-long-jump zone!!!
    51195217 */
    5120 static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
     5218static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu)
    51215219{
    51225220    int  rc            = VERR_INTERNAL_ERROR_5;
     
    52345332DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    52355333{
     5334    NOREF(pMixedCtx);
    52365335    uint32_t u32IntInfo  = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
    52375336    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    52655364    if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
    52665365    {
    5267         rc = hmR0VmxReadExitIntInfoVmcs(pVCpu, pVmxTransient);
     5366        rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    52685367        AssertRCReturn(rc, rc);
    52695368
     
    54035502static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    54045503{
     5504    NOREF(pMixedCtx);
     5505
    54055506    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
    54065507    {
     
    54345535static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    54355536{
     5537    NOREF(pMixedCtx);
     5538
    54365539    int rc = VINF_SUCCESS;
    54375540    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
     
    56015704static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    56025705{
     5706    NOREF(pMixedCtx);
    56035707    /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
    56045708    pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
     
    56485752
    56495753/**
    5650  * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
    5651  * context.
     5754 * Saves the auto load/store'd guest MSRs from the current VMCS into the
     5755 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
     5756 * and TSC_AUX.
    56525757 *
    56535758 * @returns VBox status code.
     
    56595764 * @remarks No-long-jump zone!!!
    56605765 */
    5661 static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    5662 {
    5663     int rc = VINF_SUCCESS;
    5664     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
    5665     {
    5666         uint64_t u64Val = 0;
    5667         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &u64Val);   AssertRCReturn(rc, rc);
    5668         pMixedCtx->fs.u64Base = u64Val;
    5669         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
    5670     }
    5671     return rc;
    5672 }
    5673 
    5674 
    5675 /**
    5676  * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
    5677  * context.
    5678  *
    5679  * @returns VBox status code.
    5680  * @param   pVCpu       Pointer to the VMCPU.
    5681  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    5682  *                      out-of-sync. Make sure to update the required fields
    5683  *                      before using them.
    5684  *
    5685  * @remarks No-long-jump zone!!!
    5686  */
    5687 static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    5688 {
    5689     int rc = VINF_SUCCESS;
    5690     if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
    5691     {
    5692         uint64_t u64Val = 0;
    5693         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &u64Val);   AssertRCReturn(rc, rc);
    5694         pMixedCtx->gs.u64Base = u64Val;
    5695         pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
    5696     }
    5697     return rc;
    5698 }
    5699 
    5700 
    5701 /**
    5702  * Saves the auto load/store'd guest MSRs from the current VMCS into the
    5703  * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
    5704  * and TSC_AUX.
    5705  *
    5706  * @returns VBox status code.
    5707  * @param   pVCpu       Pointer to the VMCPU.
    5708  * @param   pMixedCtx   Pointer to the guest-CPU context. The data maybe
    5709  *                      out-of-sync. Make sure to update the required fields
    5710  *                      before using them.
    5711  *
    5712  * @remarks No-long-jump zone!!!
    5713  */
    57145766static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    57155767{
     
    57185770
    57195771#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    5720     for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
    5721     {
    5722         PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
    5723         pMsr += i;
     5772    PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
     5773    Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", pVCpu->hm.s.vmx.cMsrs));
     5774    for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++)
     5775    {
    57245776        switch (pMsr->u32Msr)
    57255777        {
    5726             case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR  = pMsr->u64Value;                   break;
    5727             case MSR_K6_STAR:           pMixedCtx->msrSTAR   = pMsr->u64Value;                   break;
    5728             case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK = pMsr->u64Value;                   break;
     5778            case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR        = pMsr->u64Value;             break;
     5779            case MSR_K6_STAR:           pMixedCtx->msrSTAR         = pMsr->u64Value;             break;
     5780            case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK       = pMsr->u64Value;             break;
     5781            case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;             break;
    57295782            case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
    5730             case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value;             break;
    5731             case MSR_K6_EFER:           /* EFER can't be changed without causing a VM-exit. */   break;
    57325783            default:
    57335784            {
     
    58675918                                      PCPUMSELREG pSelReg)
    58685919{
     5920    NOREF(pVCpu);
     5921
    58695922    uint32_t u32Val = 0;
    58705923    int rc = VMXReadVmcs32(idxSel, &u32Val);
     
    60866139static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    60876140{
     6141    NOREF(pMixedCtx);
     6142
    60886143    /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
    60896144    pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
     
    61356190    rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
    61366191    AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    6137 
    6138     rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
    6139     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    6140 
    6141     rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
    6142     AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
    61436192
    61446193    rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
     
    69016950
    69026951    Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
     6952    NOREF(fBlockMovSS); NOREF(fBlockSti);
    69036953    return rc;
    69046954}
     
    69156965DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    69166966{
     6967    NOREF(pMixedCtx);
    69176968    uint32_t u32IntInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
    69186969    hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
     
    69497000DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
    69507001{
     7002    NOREF(pMixedCtx);
    69517003    uint32_t u32IntInfo  = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
    69527004    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    69677019DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
    69687020{
     7021    NOREF(pMixedCtx);
    69697022    uint32_t u32IntInfo  = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
    69707023    u32IntInfo          |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
     
    70087061DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
    70097062{
     7063    NOREF(pMixedCtx);
    70107064    uint32_t u32IntInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
    70117065    if (   uVector == X86_XCPT_BP
     
    73067360    Assert(pVM->hm.s.vmx.fSupported);
    73077361    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    7308     NOREF(pCpu);
     7362    NOREF(pCpu); NOREF(pVM);
    73097363
    73107364    LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
     
    73477401VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
    73487402{
     7403    NOREF(fGlobalInit);
     7404
    73497405    switch (enmEvent)
    73507406    {
     
    75517607
    75527608    rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
    7553     AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
     7609    AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadSharedMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    75547610
    75557611    rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
     
    75887644static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
    75897645{
     7646    NOREF(pVM);
     7647
    75907648    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    75917649    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     
    78417899
    78427900    /*
     7901     * The host MSR values the very first time around won't be updated, so we need to
     7902     * fill those values in. Subsequently, it's updated as part of the host state.
     7903     */
     7904    if (!pVCpu->hm.s.vmx.fUpdatedHostMsrs)
     7905        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);
     7906
     7907    /*
    78437908     * Load the host state bits as we may've been preempted (only happens when
    78447909     * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM).
     
    78867951        || idCurrentCpu != pVCpu->hm.s.idLastCpu)
    78877952    {
    7888         hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
     7953        hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu);
    78897954        pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
    78907955    }
     
    79007965                                                                   to start executing. */
    79017966
    7902 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     7967    /** @todo Get rid of VBOX_WITH_AUTO_MSR_LOAD_RESTORE define. */
     7968#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     7969    /*
     7970     * Load the TSC_AUX MSR when we are not intercepting RDTSCP.
     7971     */
     7972    uint64_t uGuestTscAuxMsr;
     7973    if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     7974    {
     7975        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
     7976        {
     7977            int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &uGuestTscAuxMsr);
     7978            AssertRC(rc2);
     7979            hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, uGuestTscAuxMsr, true /* fUpdateHostMsr */);
     7980        }
     7981        else
     7982            hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX);
     7983    }
     7984#ifdef VBOX_STRICT
     7985    hmR0VmxCheckAutoLoadStoreMsrs(pVCpu);
     7986#endif
     7987#else
    79037988    /*
    79047989     * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
    79057990     * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
    79067991     */
    7907     if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     7992    if (   (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     7993        && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    79087994    {
    79097995        pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
    7910         uint64_t u64HostTscAux = 0;
    7911         int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
     7996        int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAuxMsr);
    79127997        AssertRC(rc2);
    7913         ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
     7998        ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAuxMsr);
    79147999    }
    79158000#endif
     
    79368021static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
    79378022{
     8023    NOREF(pVM);
     8024
    79388025    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
    79398026
     
    79468033    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
    79478034    {
    7948 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    7949         /* Restore host's TSC_AUX. */
    79508035        if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
     8036        {
     8037#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
     8038            /* VT-x restored the host TSC_AUX MSR for us, update the guest value from the VMCS area
     8039               if it could have changed without causing a VM-exit. */
     8040            if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     8041            {
     8042                int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
     8043                AssertRC(rc2);
     8044            }
     8045#else
     8046            /* Update guest's TSC_AUX if it could have changed. */
     8047            if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     8048            {
     8049                uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);
     8050                CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, u64GuestTscAuxMsr);
     8051            }
     8052            /* Restore host's TSC_AUX. */
    79518053            ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
    79528054#endif
     8055        }
     8056
    79538057        /** @todo Find a way to fix hardcoding a guestimate.  */
    79548058        TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
     
    82378341DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
    82388342{
     8343#ifdef DEBUG_ramshankar
     8344# define SVVMCS()       do { int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); } while (0)
     8345# define LDVMCS()       do { VMCPU_HMCF_SET(pVCpu, HM_CHANGED_ALL_GUEST); } while (0)
     8346#endif
    82398347    int rc;
    82408348    switch (rcReason)
    82418349    {
    8242         case VMX_EXIT_EPT_MISCONFIG:           rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
    8243         case VMX_EXIT_EPT_VIOLATION:           rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
    8244         case VMX_EXIT_IO_INSTR:                rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
    8245         case VMX_EXIT_CPUID:                   rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
    8246         case VMX_EXIT_RDTSC:                   rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
    8247         case VMX_EXIT_RDTSCP:                  rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
    8248         case VMX_EXIT_APIC_ACCESS:             rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
    8249         case VMX_EXIT_XCPT_OR_NMI:             rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); break;
    8250         case VMX_EXIT_MOV_CRX:                 rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
    8251         case VMX_EXIT_EXT_INT:                 rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
    8252         case VMX_EXIT_INT_WINDOW:              rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
    8253         case VMX_EXIT_MWAIT:                   rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
    8254         case VMX_EXIT_MONITOR:                 rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
    8255         case VMX_EXIT_TASK_SWITCH:             rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
    8256         case VMX_EXIT_PREEMPT_TIMER:           rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
    8257         case VMX_EXIT_RDMSR:                   rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
    8258         case VMX_EXIT_WRMSR:                   rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
    8259         case VMX_EXIT_MOV_DRX:                 rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
    8260         case VMX_EXIT_TPR_BELOW_THRESHOLD:     rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
    8261         case VMX_EXIT_HLT:                     rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
    8262         case VMX_EXIT_INVD:                    rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
    8263         case VMX_EXIT_INVLPG:                  rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
    8264         case VMX_EXIT_RSM:                     rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
    8265         case VMX_EXIT_MTF:                     rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
    8266         case VMX_EXIT_PAUSE:                   rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
    8267         case VMX_EXIT_XDTR_ACCESS:             rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
    8268         case VMX_EXIT_TR_ACCESS:               rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
    8269         case VMX_EXIT_WBINVD:                  rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
    8270         case VMX_EXIT_XSETBV:                  rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
    8271         case VMX_EXIT_RDRAND:                  rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
    8272         case VMX_EXIT_INVPCID:                 rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
    8273         case VMX_EXIT_GETSEC:                  rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
    8274         case VMX_EXIT_RDPMC:                   rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
     8350        case VMX_EXIT_EPT_MISCONFIG:           /* SVVMCS(); */ rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient);      /* LDVMCS(); */ break;
     8351        case VMX_EXIT_EPT_VIOLATION:           /* SVVMCS(); */ rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient);      /* LDVMCS(); */ break;
     8352        case VMX_EXIT_IO_INSTR:                /* SVVMCS(); */ rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient);           /* LDVMCS(); */ break;
     8353        case VMX_EXIT_CPUID:                   /* SVVMCS(); */ rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient);             /* LDVMCS(); */ break;
     8354        case VMX_EXIT_RDTSC:                   /* SVVMCS(); */ rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient);             /* LDVMCS(); */ break;
     8355        case VMX_EXIT_RDTSCP:                  /* SVVMCS(); */ rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8356        case VMX_EXIT_APIC_ACCESS:             /* SVVMCS(); */ rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient);        /* LDVMCS(); */ break;
     8357        case VMX_EXIT_XCPT_OR_NMI:             /* SVVMCS(); */ rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient);         /* LDVMCS(); */ break;
     8358        case VMX_EXIT_MOV_CRX:                 /* SVVMCS(); */ rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8359        case VMX_EXIT_EXT_INT:                 /* SVVMCS(); */ rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8360        case VMX_EXIT_INT_WINDOW:              /* SVVMCS(); */ rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient);         /* LDVMCS(); */ break;
     8361        case VMX_EXIT_MWAIT:                   /* SVVMCS(); */ rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient);             /* LDVMCS(); */ break;
     8362        case VMX_EXIT_MONITOR:                 /* SVVMCS(); */ rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient);           /* LDVMCS(); */ break;
     8363        case VMX_EXIT_TASK_SWITCH:             /* SVVMCS(); */ rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient);        /* LDVMCS(); */ break;
     8364        case VMX_EXIT_PREEMPT_TIMER:           /* SVVMCS(); */ rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient);      /* LDVMCS(); */ break;
     8365        case VMX_EXIT_RDMSR:                   /* SVVMCS(); */ rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient);             /* LDVMCS(); */ break;
     8366        case VMX_EXIT_WRMSR:                   /* SVVMCS(); */ rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient);             /* LDVMCS(); */ break;
     8367        case VMX_EXIT_MOV_DRX:                 /* SVVMCS(); */ rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8368        case VMX_EXIT_TPR_BELOW_THRESHOLD:     /* SVVMCS(); */ rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); /* LDVMCS(); */ break;
     8369        case VMX_EXIT_HLT:                     /* SVVMCS(); */ rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient);               /* LDVMCS(); */ break;
     8370        case VMX_EXIT_INVD:                    /* SVVMCS(); */ rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient);              /* LDVMCS(); */ break;
     8371        case VMX_EXIT_INVLPG:                  /* SVVMCS(); */ rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8372        case VMX_EXIT_RSM:                     /* SVVMCS(); */ rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient);               /* LDVMCS(); */ break;
     8373        case VMX_EXIT_MTF:                     /* SVVMCS(); */ rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient);               /* LDVMCS(); */ break;
     8374        case VMX_EXIT_PAUSE:                   /* SVVMCS(); */ rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient);             /* LDVMCS(); */ break;
     8375        case VMX_EXIT_XDTR_ACCESS:             /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient);        /* LDVMCS(); */ break;
     8376        case VMX_EXIT_TR_ACCESS:               /* SVVMCS(); */ rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient);        /* LDVMCS(); */ break;
     8377        case VMX_EXIT_WBINVD:                  /* SVVMCS(); */ rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8378        case VMX_EXIT_XSETBV:                  /* SVVMCS(); */ rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8379        case VMX_EXIT_RDRAND:                  /* SVVMCS(); */ rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8380        case VMX_EXIT_INVPCID:                 /* SVVMCS(); */ rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient);           /* LDVMCS(); */ break;
     8381        case VMX_EXIT_GETSEC:                  /* SVVMCS(); */ rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient);            /* LDVMCS(); */ break;
     8382        case VMX_EXIT_RDPMC:                   /* SVVMCS(); */ rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient);             /* LDVMCS(); */ break;
    82758383
    82768384        case VMX_EXIT_TRIPLE_FAULT:            rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
     
    83388446            do { \
    83398447                Log4Func(("\n")); \
    8340             } while(0)
     8448            } while (0)
    83418449#else   /* Release builds */
    8342 # define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
    8343 # define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
     8450# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
     8451            do { \
     8452                HMVMX_STOP_EXIT_DISPATCH_PROF(); \
     8453                NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient); \
     8454            } while (0)
     8455# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while (0)
    83448456#endif
    83458457
     
    83598471DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    83608472{
    8361     int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     8473    int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    83628474    rc    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    83638475    AssertRCReturn(rc, rc);
     
    83878499                                                uError = (err); \
    83888500                                                break; \
    8389                                             } else do {} while (0)
     8501                                            } else do { } while (0)
    83908502/* Duplicate of IEM_IS_CANONICAL(). */
    83918503#define HMVMX_IS_CANONICAL(a_u64Addr)       ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
     
    89989110    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
    89999111
    9000     int rc = hmR0VmxReadExitIntInfoVmcs(pVCpu, pVmxTransient);
     9112    int rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    90019113    AssertRCReturn(rc, rc);
    90029114
     
    90759187                        Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
    90769188
    9077                         rc  = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    9078                         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
     9189                        rc  = hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     9190                        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    90799191                        AssertRCReturn(rc, rc);
    90809192                        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntInfo),
     
    92959407    Assert(!pVM->hm.s.fNestedPaging);
    92969408
    9297     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9409    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    92989410    rc    |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
    92999411    AssertRCReturn(rc, rc);
     
    93879499     * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
    93889500     */
     9501    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    93899502    AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
    93909503    HMVMX_RETURN_UNEXPECTED_EXIT();
     
    94039516     * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
    94049517     */
     9518    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    94059519    AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
    94069520    HMVMX_RETURN_UNEXPECTED_EXIT();
     
    94149528{
    94159529    /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
     9530    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    94169531    AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
    94179532    HMVMX_RETURN_UNEXPECTED_EXIT();
     
    94299544     * See Intel spec. 25.3 "Other Causes of VM-exits".
    94309545     */
     9546    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    94319547    AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
    94329548    HMVMX_RETURN_UNEXPECTED_EXIT();
     
    95629678    rc  = hmR0VmxReadEntryIntInfoVmcs(pVmxTransient);
    95639679    rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
    9564     rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
     9680    rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient);
    95659681    rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
    95669682    AssertRCReturn(rc, rc);
     
    95849700    rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val);                 AssertRC(rc);
    95859701    Log4(("VMX_VMCS64_CTRL_EPTP_FULL                  %#RX64\n", u64Val));
    9586 #endif
    9587 
    9588     PVM pVM = pVCpu->CTX_SUFF(pVM);
    9589     HMDumpRegs(pVM, pVCpu, pMixedCtx);
    9590 
     9702#else
     9703    NOREF(pVmxTransient);
     9704#endif
     9705
     9706    HMDumpRegs(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
    95919707    return VERR_VMX_INVALID_GUEST_STATE;
    95929708}
     
    95999715HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    96009716{
    9601     AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     9717    NOREF(pVmxTransient);
     9718    AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
    96029719    HMVMX_RETURN_UNEXPECTED_EXIT();
    96039720}
     
    96109727HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
    96119728{
    9612     AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
     9729    NOREF(pVmxTransient);
     9730    AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx)); NOREF(pMixedCtx);
    96139731    HMVMX_RETURN_UNEXPECTED_EXIT();
    96149732}
     
    96229740{
    96239741    AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
     9742    NOREF(pVCpu); NOREF(pMixedCtx); NOREF(pVmxTransient);
    96249743    return VERR_VMX_UNDEFINED_EXIT_CODE;
    96259744}
     
    96719790    rc     |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    96729791    rc     |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
     9792    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
     9793        rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    96739794    AssertRCReturn(rc, rc);
    96749795    Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx));
     9796
     9797#ifdef VBOX_STRICT
     9798    if (   (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
     9799        && hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
     9800    {
     9801        AssertMsgFailed(("Unexpected RDMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
     9802        HMVMX_RETURN_UNEXPECTED_EXIT();
     9803    }
     9804#endif
    96759805
    96769806    PVM pVM = pVCpu->CTX_SUFF(pVM);
     
    97029832    rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
    97039833    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    9704     rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
     9834    if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
     9835        rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
    97059836    AssertRCReturn(rc, rc);
    97069837    Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
     
    97239854            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_APIC_STATE);
    97249855        }
    9725         else if (pMixedCtx->ecx == MSR_K6_EFER)         /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
    9726             VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
    97279856        else if (pMixedCtx->ecx == MSR_IA32_TSC)        /* Windows 7 does this during bootup. See @bugref{6398}. */
    97289857            pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
    97299858
    9730         /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
     9859        /* Update MSRs that are part of the VMCS and auto-load/store area when MSR-bitmaps are not supported. */
    97319860        if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
    97329861        {
     
    97389867                case MSR_K8_FS_BASE:        /* no break */
    97399868                case MSR_K8_GS_BASE:        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);     break;
    9740                 case MSR_K8_KERNEL_GS_BASE: VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);    break;
     9869                default:
     9870                {
     9871                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
     9872                        VMCPU_HMCF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);
     9873                    break;
     9874                }
    97419875            }
    97429876        }
     
    97579891                }
    97589892
    9759                 case MSR_K8_LSTAR:
    9760                 case MSR_K6_STAR:
    9761                 case MSR_K8_SF_MASK:
    9762                 case MSR_K8_TSC_AUX:
    9763                 case MSR_K8_KERNEL_GS_BASE:
     9893                /* Writes to MSRs that are part of the auto-load/store are shouldn't cause VM-exits
     9894                   when MSR-bitmaps are supported. */
     9895                default:
    97649896                {
    9765                     AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
    9766                                      pMixedCtx->ecx));
    9767                     HMVMX_RETURN_UNEXPECTED_EXIT();
     9897                    if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx))
     9898                    {
     9899                        AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
     9900                                         pMixedCtx->ecx));
     9901                        HMVMX_RETURN_UNEXPECTED_EXIT();
     9902                    }
     9903                    break;
    97689904                }
    97699905            }
     
    98029938    /*
    98039939     * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
    9804      * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
     9940     * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectPendingEvent() and
    98059941     * resume guest execution.
    98069942     */
     
    98259961    HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
    98269962    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
    9827     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     9963    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    98289964    AssertRCReturn(rc, rc);
    98299965
     
    995810094    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
    995910095
    9960     int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    9961     rc2    |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     10096    int rc2 = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
     10097    rc2    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    996210098    rc2    |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
    996310099    rc2    |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);         /* Eflag checks in EMInterpretDisasCurrent(). */
     
    999710133        if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.Msrs.u64BasicInfo))
    999810134        {
    9999             rc2  = hmR0VmxReadExitInstrInfoVmcs(pVCpu, pVmxTransient);
     10135            rc2  = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient);
    1000010136            /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
    1000110137            rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     
    1003310169#else
    1003410170        PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
    10035         rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
     10171        rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL /* pcbInstr */);
    1003610172        if (RT_SUCCESS(rcStrict))
    1003710173        {
     
    1009410230        }
    1009510231
    10096         /* INS & OUTS with REP prefix modify RFLAGS. */
     10232        /*
     10233         * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest.
     10234         * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ.
     10235         */
    1009710236        if (fIOString)
    1009810237            VMCPU_HMCF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);
     
    1017610315
    1017710316    /* Check if this task-switch occurred while delivery an event through the guest IDT. */
    10178     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     10317    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    1017910318    AssertRCReturn(rc, rc);
    1018010319    if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
     
    1026110400    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    1026210401#endif
    10263     rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     10402    rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    1026410403    AssertRCReturn(rc, rc);
    1026510404
     
    1035210491
    1035310492        /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
    10354         PVM pVM = pVCpu->CTX_SUFF(pVM);
    1035510493        CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
    1035610494        Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
     
    1036010498
    1036110499#ifdef VBOX_WITH_STATISTICS
    10362         rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     10500        rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    1036310501        AssertRCReturn(rc, rc);
    1036410502        if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
     
    1037510513     * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update the segment registers and DR7 from the CPU.
    1037610514     */
    10377     rc  = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     10515    rc  = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    1037810516    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    1037910517    rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
     
    1048210620    RTGCPHYS GCPhys = 0;
    1048310621    rc  = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
    10484     rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     10622    rc |= hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    1048510623#if 0
    1048610624    rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);     /** @todo Can we do better?  */
     
    1058210720    if (rc == VINF_EM_RAW_GUEST_TRAP)
    1058310721    {
    10584         rc  = hmR0VmxReadExitIntInfoVmcs(pVCpu, pVmxTransient);
    10585         rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    10586         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
     10722        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     10723        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     10724        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1058710725        AssertRCReturn(rc, rc);
    1058810726
     
    1060910747     * for processing.
    1061010748     */
    10611     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
     10749    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
    1061210750    AssertRCReturn(rc, rc);
    1061310751
     
    1065210790         * Raise #DB in the guest.
    1065310791         */
    10654         rc  = hmR0VmxReadExitIntInfoVmcs(pVCpu, pVmxTransient);
    10655         rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
    10656         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
     10792        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     10793        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
     10794        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1065710795        AssertRCReturn(rc, rc);
    1065810796        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
     
    1071810856        /* Forward #NM to the guest. */
    1071910857        Assert(rc == VINF_EM_RAW_GUEST_TRAP);
    10720         rc = hmR0VmxReadExitIntInfoVmcs(pVCpu, pVmxTransient);
     10858        rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
    1072110859        AssertRCReturn(rc, rc);
    1072210860        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo),
     
    1074410882#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    1074510883        /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
    10746         rc  = hmR0VmxReadExitIntInfoVmcs(pVCpu, pVmxTransient);
    10747         rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
    10748         rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     10884        rc  = hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     10885        rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     10886        rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1074910887        rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    1075010888        AssertRCReturn(rc, rc);
     
    1075710895        /* We don't intercept #GP. */
    1075810896        AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
     10897        NOREF(pVmxTransient);
    1075910898        return VERR_VMX_UNEXPECTED_EXCEPTION;
    1076010899#endif
     
    1097511114
    1097611115
     11116#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
    1097711117/**
    1097811118 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
     
    1098811128    /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
    1098911129       hmR0VmxCheckExitDueToEventDelivery(). */
    10990     int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
    10991     rc    |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
     11130    int rc = hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
     11131    rc    |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient);
    1099211132    AssertRCReturn(rc, rc);
    1099311133    Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
     
    1099711137    return VINF_SUCCESS;
    1099811138}
     11139#endif
    1099911140
    1100011141
     
    1100611147    HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
    1100711148    PVM pVM = pVCpu->CTX_SUFF(pVM);
    11008     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    11009     rc    |= hmR0VmxReadExitIntInfoVmcs(pVCpu, pVmxTransient);
    11010     rc    |= hmR0VmxReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);
     11149    int rc = hmR0VmxReadExitQualificationVmcs(pVmxTransient);
     11150    rc    |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient);
     11151    rc    |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient);
    1101111152    AssertRCReturn(rc, rc);
    1101211153
     
    1103211173#else
    1103311174    Assert(!pVM->hm.s.fNestedPaging);
     11175    NOREF(pVM);
    1103411176#endif
    1103511177
  • trunk/src/VBox/VMM/VMMR3/HM.cpp

    r49508 r49520  
    29232923                    LogRel(("HM: CPU[%u] EntryCtls        %#RX32\n", i, pVCpu->hm.s.vmx.u32EntryCtls));
    29242924                    LogRel(("HM: CPU[%u] ExitCtls         %#RX32\n", i, pVCpu->hm.s.vmx.u32ExitCtls));
    2925                     LogRel(("HM: CPU[%u] MSRBitmapPhys    %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysMsrBitmap));
     2925                    LogRel(("HM: CPU[%u] HCPhysMsrBitmap  %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysMsrBitmap));
    29262926#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
    2927                     LogRel(("HM: CPU[%u] GuestMSRPhys     %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysGuestMsr));
    2928                     LogRel(("HM: CPU[%u] HostMsrPhys      %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysHostMsr));
    2929                     LogRel(("HM: CPU[%u] cGuestMSRs       %u\n",     i, pVCpu->hm.s.vmx.cGuestMsrs));
     2927                    LogRel(("HM: CPU[%u] HCPhysGuestMsr   %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysGuestMsr));
     2928                    LogRel(("HM: CPU[%u] HCPhysHostMsr    %#RHp\n",  i, pVCpu->hm.s.vmx.HCPhysHostMsr));
     2929                    LogRel(("HM: CPU[%u] cMsrs            %u\n",     i, pVCpu->hm.s.vmx.cMsrs));
    29302930#endif
    29312931                }
  • trunk/src/VBox/VMM/include/HMInternal.h

    r49275 r49520  
    7979 * @{
    8080 */
    81 #define HM_CHANGED_GUEST_CR0                     RT_BIT(0)
     81#define HM_CHANGED_GUEST_CR0                     RT_BIT(0)      /* Shared */
    8282#define HM_CHANGED_GUEST_CR3                     RT_BIT(1)
    8383#define HM_CHANGED_GUEST_CR4                     RT_BIT(2)
     
    8787#define HM_CHANGED_GUEST_TR                      RT_BIT(6)
    8888#define HM_CHANGED_GUEST_SEGMENT_REGS            RT_BIT(7)
    89 #define HM_CHANGED_GUEST_DEBUG                   RT_BIT(8)
     89#define HM_CHANGED_GUEST_DEBUG                   RT_BIT(8)      /* Shared */
    9090#define HM_CHANGED_GUEST_RIP                     RT_BIT(9)
    9191#define HM_CHANGED_GUEST_RSP                     RT_BIT(10)
     
    605605        R0PTRTYPE(void *)           pvHostMsr;
    606606
    607         /** Number of automatically loaded/restored guest MSRs during the world switch. */
    608         uint32_t                    cGuestMsrs;
    609         uint32_t                    uAlignment;
     607        /** Number of guest/host MSR pairs in the auto-load/store area. */
     608        uint32_t                    cMsrs;
     609        /** Whether the host MSR values are up-to-date. */
     610        bool                        fUpdatedHostMsrs;
     611        uint8_t                     u8Align[7];
    610612#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
    611613
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette