VirtualBox

Changeset 45585 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Apr 17, 2013 11:31:45 AM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
85079
Message:

VMMR0/HMVMXR0: Fix regression with EFER MSR write exits. Fixes NetBSD again.

Location:
trunk/src/VBox/VMM
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r45576 r45585  
    3636#endif
    3737#ifdef DEBUG_ramshankar
     38#define VBOX_ALWAYS_SAVE_FULL_VTX_STATE
     39#define VBOX_ALWAYS_SYNC_FULL_VTX_STATE
    3840#define VBOX_ALWAYS_TRAP_ALL_EXCEPTIONS
    3941#endif
     
    35303532    }
    35313533
    3532     /*
    3533      * Guest FS & GS base MSRs.
    3534      * We already initialized the FS & GS base as part of the guest segment registers, but the guest's FS/GS base
    3535      * MSRs might have changed (e.g. due to WRMSR) and we need to update the bases if that happened. These MSRs
    3536      * are only available in 64-bit mode.
    3537      */
    3538     /** @todo Avoid duplication of this code in assembly (see MYPUSHSEGS) - it
    3539      *        should not be necessary to do it in assembly again. */
    3540     if (CPUMIsGuestInLongModeEx(pCtx))
    3541     {
    3542         if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_FS_BASE_MSR)
    3543         {
    3544             rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_FS_BASE, pCtx->fs.u64Base);
    3545             AssertRCReturn(rc, rc);
    3546             pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_FS_BASE_MSR;
    3547         }
    3548 
    3549         if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GS_BASE_MSR)
    3550         {
    3551             rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GS_BASE, pCtx->gs.u64Base);
    3552             AssertRCReturn(rc, rc);
    3553             pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GS_BASE_MSR;
    3554         }
    3555     }
    3556     else
    3557         pVCpu->hm.s.fContextUseFlags &= ~(HM_CHANGED_GUEST_FS_BASE_MSR | HM_CHANGED_GUEST_GS_BASE_MSR);
    3558 
    35593534    return VINF_SUCCESS;
    35603535}
     
    51475122        switch (pMsr->u32IndexMSR)
    51485123        {
    5149             case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR = pMsr->u64Value;                    break;
    5150             case MSR_K6_STAR:           pMixedCtx->msrSTAR = pMsr->u64Value;                     break;
     5124            case MSR_K8_LSTAR:          pMixedCtx->msrLSTAR  = pMsr->u64Value;                   break;
     5125            case MSR_K6_STAR:           pMixedCtx->msrSTAR   = pMsr->u64Value;                    break;
    51515126            case MSR_K8_SF_MASK:        pMixedCtx->msrSFMASK = pMsr->u64Value;                   break;
    51525127            case MSR_K8_TSC_AUX:        CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);  break;
     
    54405415
    54415416    VMMRZCallRing3Disable(pVCpu);
     5417    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     5418    LogFunc(("\n"));
    54425419
    54435420    int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
     
    57385715{
    57395716    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     5717    Assert(VMMR0IsLogFlushDisabled(pVCpu));
    57405718
    57415719    int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     
    58545832
    58555833    VMMRZCallRing3Disable(pVCpu);
    5856     Log(("hmR0VmxLongJmpToRing3\n"));
     5834    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     5835    Log(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3\n"));
    58575836    hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser, VINF_VMM_UNKNOWN_RING3_CALL);
    58585837    VMMRZCallRing3Enable(pVCpu);
     
    65936572    Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
    65946573    Log4(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
     6574#ifdef VBOX_ALWAYS_SYNC_FULL_VTX_STATE
     6575    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
     6576#endif
    65956577    int rc = VINF_SUCCESS;
    65966578    if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
     
    67176699        /* Update the guest interruptibility-state from the VMCS. */
    67186700        hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
    6719 
     6701#if defined(VBOX_ALWAYS_SYNC_FULL_VTX_STATE) || defined(VBOX_ALWAYS_SAVE_FULL_VTX_STATE)
     6702        rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
     6703        AssertRC(rc);
     6704#endif
    67206705        /*
    67216706         * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
     
    76617646    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    76627647    AssertRCReturn(rc, rc);
     7648    Log(("ecx=%#RX32\n", pMixedCtx->ecx));
    76637649
    76647650    rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
     
    76787664        }
    76797665        else if (pMixedCtx->ecx == MSR_K6_EFER)         /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
     7666        {
     7667            rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
     7668            AssertRCReturn(rc, rc);
    76807669            pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
     7670        }
    76817671
    76827672        /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
     
    76887678                case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
    76897679                case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
    7690                 case MSR_K8_FS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_FS_BASE_MSR;      break;
    7691                 case MSR_K8_GS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_GS_BASE_MSR;      break;
     7680                case MSR_K8_FS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;     break;
     7681                case MSR_K8_GS_BASE:        pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;     break;
    76927682                case MSR_K8_KERNEL_GS_BASE: /* If we auto-load it, update HM_CHANGED_VMX_GUEST_AUTO_MSRS. */   break;
    76937683            }
     
    79217911    /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
    79227912    AssertRCReturn(rc, rc);
     7913    Log(("CS:RIP=%04x:%#RGv\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
    79237914
    79247915    /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
     
    81418132{
    81428133    VMX_VALIDATE_EXIT_HANDLER_PARAMS();
    8143     int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    81448134
    81458135    /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
    8146     rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
     8136    int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
    81478137    if (RT_UNLIKELY(rc == VINF_VMX_DOUBLE_FAULT))
    81488138        return VINF_SUCCESS;
     
    81608150    rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
    81618151#endif
     8152    rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
    81628153    AssertRCReturn(rc, rc);
    81638154
     
    81728163                && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
    81738164            {
    8174                 AssertMsgFailed(("hmR0VmxExitApicAccess: can't touch TPR offset while using TPR shadowing.\n"));
     8165                AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
    81758166            }
    81768167
     
    81798170            GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
    81808171            PVM pVM = pVCpu->CTX_SUFF(pVM);
     8172            Log(("ApicAccess uAccessType=%#x GCPhys=%RGp Off=%#x\n", uAccessType, GCPhys,
     8173                 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
     8174
    81818175            VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
    81828176                                                  (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
    81838177                                                  CPUMCTX2CORE(pMixedCtx), GCPhys);
    81848178            rc = VBOXSTRICTRC_VAL(rc2);
    8185             Log(("ApicAccess %RGp %#x rc=%d\n", GCPhys,
    8186                  VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification), rc));
     8179            Log(("ApicAccess rc=%d\n", rc));
    81878180            if (   rc == VINF_SUCCESS
    81888181                || rc == VERR_PAGE_TABLE_NOT_PRESENT
     
    81978190
    81988191        default:
     8192            Log(("ApicAccess uAccessType=%#x\n", uAccessType));
    81998193            rc = VINF_EM_RAW_EMULATE_INSTR;
    82008194            break;
  • trunk/src/VBox/VMM/include/HMInternal.h

    r45531 r45585  
    111111# define HM_CHANGED_GUEST_SEGMENT_REGS           RT_BIT(10)
    112112# define HM_CHANGED_GUEST_DEBUG                  RT_BIT(11)
    113 # define HM_CHANGED_GUEST_FS_BASE_MSR            RT_BIT(12)
    114 # define HM_CHANGED_GUEST_GS_BASE_MSR            RT_BIT(13)
    115 # define HM_CHANGED_GUEST_SYSENTER_CS_MSR        RT_BIT(14)
    116 # define HM_CHANGED_GUEST_SYSENTER_EIP_MSR       RT_BIT(15)
    117 # define HM_CHANGED_GUEST_SYSENTER_ESP_MSR       RT_BIT(16)
    118 # define HM_CHANGED_VMX_GUEST_AUTO_MSRS          RT_BIT(17)
    119 # define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE     RT_BIT(18)
    120 # define HM_CHANGED_VMX_GUEST_APIC_STATE         RT_BIT(19)
    121 # define HM_CHANGED_VMX_ENTRY_CTLS               RT_BIT(20)
    122 # define HM_CHANGED_VMX_EXIT_CTLS                RT_BIT(21)
    123 
    124 # define HM_CHANGED_HOST_CONTEXT                 RT_BIT(22)
     113# define HM_CHANGED_GUEST_SYSENTER_CS_MSR        RT_BIT(12)
     114# define HM_CHANGED_GUEST_SYSENTER_EIP_MSR       RT_BIT(13)
     115# define HM_CHANGED_GUEST_SYSENTER_ESP_MSR       RT_BIT(14)
     116# define HM_CHANGED_VMX_GUEST_AUTO_MSRS          RT_BIT(15)
     117# define HM_CHANGED_VMX_GUEST_ACTIVITY_STATE     RT_BIT(16)
     118# define HM_CHANGED_VMX_GUEST_APIC_STATE         RT_BIT(17)
     119# define HM_CHANGED_VMX_ENTRY_CTLS               RT_BIT(18)
     120# define HM_CHANGED_VMX_EXIT_CTLS                RT_BIT(19)
     121
     122# define HM_CHANGED_HOST_CONTEXT                 RT_BIT(20)
    125123
    126124# define HM_CHANGED_ALL_GUEST   (  HM_CHANGED_GUEST_RIP                \
     
    136134                                 | HM_CHANGED_GUEST_SEGMENT_REGS       \
    137135                                 | HM_CHANGED_GUEST_DEBUG              \
    138                                  | HM_CHANGED_GUEST_FS_BASE_MSR        \
    139                                  | HM_CHANGED_GUEST_GS_BASE_MSR        \
    140136                                 | HM_CHANGED_GUEST_SYSENTER_CS_MSR    \
    141137                                 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR   \
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette