VirtualBox

Changeset 45575 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Apr 16, 2013 3:47:57 PM (12 years ago)
Author:
vboxsync
Message:

VMM/VMMR0: Update EFER on VM-reentry when WRMSR changes it. Fixes Solaris SMP guest panic.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45558 r45575  
    47374737        {
    47384738            if (   hmR0VmxIsBenignXcpt(uIdtVector)
    4739                 || hmR0VmxIsBenignXcpt(uExitVector)
    4740                 || (   hmR0VmxIsContributoryXcpt(uIdtVector)
    4741                     && uExitVector == X86_XCPT_PF))
     4739                || hmR0VmxIsBenignXcpt(uExitVector))
    47424740            {
    47434741                enmReflect = VMXREFLECTXCPT_XCPT;
     4742            }
     4743            if (   hmR0VmxIsContributoryXcpt(uIdtVector)
     4744                && uExitVector == X86_XCPT_PF)
     4745            {
     4746                enmReflect = VMXREFLECTXCPT_XCPT;
     4747                GCPtrFaultAddress = pMixedCtx->cr2;
     4748                Log(("IDT: Contributory #PF uCR2=%#RGv\n", pMixedCtx->cr2));
    47444749            }
    47454750            else if (   uExitVector == X86_XCPT_PF
     
    47494754                enmReflect = VMXREFLECTXCPT_XCPT;
    47504755                GCPtrFaultAddress = pMixedCtx->cr2;
    4751                 Log(("Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));
     4756                Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));
    47524757            }
    47534758            else if (   (pVCpu->hm.s.vmx.u32XcptBitmap & VMX_CONTRIBUTORY_XCPT_BITMAP)
     
    47604765            else if (uIdtVector == X86_XCPT_DF)
    47614766                enmReflect = VMXREFLECTXCPT_TF;
     4767            else
     4768                AssertMsgFailed(("Invalid!\n"));
    47624769        }
    47634770        else if (   uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
     
    47864793                                       0 /* cbInstr */,  u32ErrCode, GCPtrFaultAddress);
    47874794                rc = VINF_SUCCESS;
    4788                 Log(("Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));
     4795                Log(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));
    47894796                break;
    47904797            }
     
    47944801                hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
    47954802                rc = VINF_VMX_DOUBLE_FAULT;
    4796                 Log(("Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uIdtVector,
     4803                Log(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uIdtVector,
    47974804                     uExitVector));
    47984805                break;
     
    48014808            case VMXREFLECTXCPT_TF:
    48024809            {
    4803                 Log(("Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
     4810                Log(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));
    48044811                rc = VINF_EM_RESET;
    48054812                break;
     
    62846291
    62856292    /* Inject. */
    6286     Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
     6293    Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
    62876294    rc  = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
    62886295    if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
     
    69016908                Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));             \
    69026909                VMX_ASSERT_PREEMPT_CPUID_VAR();                              \
    6903                 LogFunc(("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n")); \
     6910                LogFunc(("vcpu[%u] vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n", \
     6911                        (unsigned)pVCpu->idCpu));                            \
    69046912                Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));             \
    69056913                if (VMMR0IsLogFlushDisabled(pVCpu))                          \
     
    76287636{
    76297637    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
     7638    PVM pVM = pVCpu->CTX_SUFF(pVM);
    76307639    int rc = VINF_SUCCESS;
    7631     PVM pVM = pVCpu->CTX_SUFF(pVM);
    76327640
    76337641    /* If TPR patching is active, LSTAR holds the guest TPR, writes to it must be propagated to the APIC. */
     
    76477655        return VINF_SUCCESS;
    76487656    }
    7649 
    7650     /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
    7651     if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS))
    7652     {
    7653         switch (pMixedCtx->ecx)
    7654         {
    7655             case MSR_IA32_SYSENTER_CS:  pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR;  break;
    7656             case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
    7657             case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
    7658             case MSR_K8_FS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_FS_BASE_MSR;      break;
    7659             case MSR_K8_GS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_GS_BASE_MSR;      break;
    7660         }
    7661     }
    7662 #ifdef VBOX_STRICT
    7663     else
    7664     {
    7665         /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
    7666         switch (pMixedCtx->ecx)
    7667         {
    7668             case MSR_IA32_SYSENTER_CS:
    7669             case MSR_IA32_SYSENTER_EIP:
    7670             case MSR_IA32_SYSENTER_ESP:
    7671             case MSR_K8_FS_BASE:
    7672             case MSR_K8_GS_BASE:
    7673                 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%RX32\n", pMixedCtx->ecx));
    7674                 return VERR_VMX_UNEXPECTED_EXIT_CODE;
    7675             case MSR_K8_LSTAR:
    7676             case MSR_K6_STAR:
    7677             case MSR_K8_SF_MASK:
    7678             case MSR_K8_TSC_AUX:
    7679                 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%RX32\n", pMixedCtx->ecx));
    7680                 return VERR_VMX_UNEXPECTED_EXIT_CODE;
    7681         }
    7682     }
    7683 #endif
    76847657
    76857658    /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
     
    77047677            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
    77057678        }
     7679        else if (pMixedCtx->ecx == MSR_K6_EFER)         /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
     7680            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
     7681
     7682        /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
     7683        if (RT_UNLIKELY(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)))
     7684        {
     7685            switch (pMixedCtx->ecx)
     7686            {
     7687                case MSR_IA32_SYSENTER_CS:  pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR;  break;
     7688                case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
     7689                case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
     7690                case MSR_K8_FS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_FS_BASE_MSR;      break;
     7691                case MSR_K8_GS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_GS_BASE_MSR;      break;
     7692                case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */   break;
     7693            }
     7694        }
     7695#ifdef VBOX_STRICT
     7696        else
     7697        {
     7698            /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
     7699            switch (pMixedCtx->ecx)
     7700            {
     7701                case MSR_IA32_SYSENTER_CS:
     7702                case MSR_IA32_SYSENTER_EIP:
     7703                case MSR_IA32_SYSENTER_ESP:
     7704                case MSR_K8_FS_BASE:
     7705                case MSR_K8_GS_BASE:
     7706                    AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
     7707                    return VERR_VMX_UNEXPECTED_EXIT_CODE;
     7708
     7709                case MSR_K8_LSTAR:
     7710                case MSR_K6_STAR:
     7711                case MSR_K8_SF_MASK:
     7712                case MSR_K8_TSC_AUX:
     7713                {
     7714                    AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
     7715                                     pMixedCtx->ecx));
     7716                    return VERR_VMX_UNEXPECTED_EXIT_CODE;
     7717                }
     7718
     7719                case MSR_K8_KERNEL_GS_BASE:
     7720                {
     7721                    AssertMsgFailed(("Unexpected WRMSR for an MSR that is manually loaded/stored on every VM-exit. ecx=%#RX32\n",
     7722                                     pMixedCtx->ecx));
     7723                    return VERR_VMX_UNEXPECTED_EXIT_CODE;
     7724                }
     7725            }
     7726        }
     7727#endif  /* VBOX_STRICT */
    77067728    }
    77077729    return rc;
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette