VirtualBox

Changeset 46076 in vbox for trunk


Ignore:
Timestamp:
May 14, 2013 6:00:02 PM (12 years ago)
Author:
vboxsync
Message:

VMM/HMVMXR0: 32-bit hybrid darwin kernel fixes.

Location:
trunk/src/VBox/VMM/VMMR0
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r46041 r46076  
    183183    uint32_t        uExitIntrErrorCode;
    184184    /** The VM-exit exit qualification. */
    185     RTGCUINTPTR     uExitQualification;
    186 #if GC_ARCH_BITS == 32
    187     /** Alignment. */
    188     uint32_t        u32Alignment1;
    189 #endif
     185    uint64_t        uExitQualification;
    190186
    191187    /** The VM-exit interruption-information field. */
     
    39103906
    39113907                /* Guest bits. */
    3912                 RTGCUINTREG uGCReg;
    3913                 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uGCReg);        AssertRC(rc);
    3914                 Log(("Old Guest Rip %#RGv New %#RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)uGCReg));
    3915                 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uGCReg);        AssertRC(rc);
    3916                 Log(("Old Guest Rsp %#RGv New %#RGv\n", (RTGCPTR)pCtx->rsp, (RTGCPTR)uGCReg));
     3908                rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);        AssertRC(rc);
     3909                Log(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
     3910                rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);        AssertRC(rc);
     3911                Log(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
    39173912                rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);       AssertRC(rc);
    39183913                Log(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
     
    46664661                && uExitVector == X86_XCPT_PF)
    46674662            {
    4668                 Log(("IDT: Contributory #PF uCR2=%#RGv\n", pMixedCtx->cr2));
     4663                Log(("IDT: Contributory #PF uCR2=%#RX64\n", pMixedCtx->cr2));
    46694664            }
    46704665#endif
     
    46734668            {
    46744669                pVmxTransient->fVectoringPF = true;
    4675                 Log(("IDT: Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2));
     4670                Log(("IDT: Vectoring #PF uCR2=%#RX64\n", pMixedCtx->cr2));
    46764671            }
    46774672            else if (   (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
     
    48204815    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
    48214816    {
    4822         RTGCUINTREG uVal = 0;
     4817        uint64_t uVal = 0;
    48234818        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &uVal);
    48244819        AssertRCReturn(rc, rc);
     
    48474842    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
    48484843    {
    4849         RTGCUINTREG uVal = 0;
     4844        uint64_t uVal = 0;
    48504845        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &uVal);
    48514846        AssertRCReturn(rc, rc);
     
    49824977    }
    49834978
    4984     RTGCUINTREG uGCVal = 0;
     4979    uint64_t uGCVal = 0;
    49854980    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
    49864981    {
     
    50165011    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
    50175012    {
    5018         RTGCUINTREG uVal = 0;
     5013        uint64_t uVal = 0;
    50195014        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &uVal);   AssertRCReturn(rc, rc);
    50205015        pMixedCtx->fs.u64Base = uVal;
     
    50425037    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
    50435038    {
    5044         RTGCUINTREG uVal = 0;
     5039        uint64_t uVal = 0;
    50455040        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &uVal);   AssertRCReturn(rc, rc);
    50465041        pMixedCtx->gs.u64Base = uVal;
     
    51305125                && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
    51315126        {
    5132             RTGCUINTREG uVal = 0;
     5127            uint64_t uVal = 0;
    51335128            rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &uVal);
    51345129            if (pMixedCtx->cr3 != uVal)
     
    52245219    pSelReg->u32Limit = u32Val;
    52255220
    5226     RTGCUINTREG uGCVal = 0;
    5227     rc = VMXReadVmcsGstNByIdxVal(idxBase, &uGCVal);
     5221    uint64_t u64Val = 0;
     5222    rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
    52285223    AssertRCReturn(rc, rc);
    5229     pSelReg->u64Base = uGCVal;
     5224    pSelReg->u64Base = u64Val;
    52305225
    52315226    rc = VMXReadVmcs32(idxAccess, &u32Val);
     
    53255320
    53265321    /* Guest GDTR. */
    5327     RTGCUINTREG uGCVal = 0;
    5328     uint32_t    u32Val = 0;
     5322    uint64_t u64Val = 0;
     5323    uint32_t u32Val = 0;
    53295324    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
    53305325    {
    5331         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &uGCVal);        AssertRCReturn(rc, rc);
     5326        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);        AssertRCReturn(rc, rc);
    53325327        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);       AssertRCReturn(rc, rc);
    5333         pMixedCtx->gdtr.pGdt  = uGCVal;
     5328        pMixedCtx->gdtr.pGdt  = u64Val;
    53345329        pMixedCtx->gdtr.cbGdt = u32Val;
    53355330        pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
     
    53395334    if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
    53405335    {
    5341         rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &uGCVal);        AssertRCReturn(rc, rc);
     5336        rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);        AssertRCReturn(rc, rc);
    53425337        rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);       AssertRCReturn(rc, rc);
    5343         pMixedCtx->idtr.pIdt  = uGCVal;
     5338        pMixedCtx->idtr.pIdt  = u64Val;
    53445339        pMixedCtx->idtr.cbIdt = u32Val;
    53455340        pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
     
    63486343        pMixedCtx->cr2 = GCPtrFaultAddress;
    63496344    }
    6350     Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RGv\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
     6345    Log(("Injecting u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x uCR2=%#RX64\n", u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
    63516346
    63526347    AssertRCReturn(rc, rc);
     
    73727367    else
    73737368    {
    7374         AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RGv failed with %Rrc\n",
     7369        AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
    73757370                                              pVmxTransient->uExitQualification, rc));
    73767371    }
     
    79217916            {
    79227917                case 0: /* CR0 */
    7923                     Log(("CRX CR0 write rc=%d CR0=%#RGv\n", rc, pMixedCtx->cr0));
     7918                    Log(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
    79247919                    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
    79257920                    break;
     
    79297924                case 3: /* CR3 */
    79307925                    Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
    7931                     Log(("CRX CR3 write rc=%d CR3=%#RGv\n", rc, pMixedCtx->cr3));
     7926                    Log(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
    79327927                    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
    79337928                    break;
    79347929                case 4: /* CR4 */
    7935                     Log(("CRX CR4 write rc=%d CR4=%#RGv\n", rc, pMixedCtx->cr4));
     7930                    Log(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
    79367931                    pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
    79377932                    break;
     
    80348029    /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
    80358030    AssertRCReturn(rc, rc);
    8036     Log(("CS:RIP=%04x:%#RGv\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
     8031    Log(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
    80378032
    80388033    /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
     
    83008295            GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
    83018296            PVM pVM = pVCpu->CTX_SUFF(pVM);
    8302             Log(("ApicAccess uAccessType=%#x GCPhys=%RGp Off=%#x\n", uAccessType, GCPhys,
     8297            Log(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
    83038298                 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
    83048299
     
    84478442    VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
    84488443    rc = VBOXSTRICTRC_VAL(rc2);
    8449     Log(("EPT misconfig at %#RGv RIP=%#RGv rc=%d\n", GCPhys, pMixedCtx->rip, rc));
     8444    Log(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
    84508445    if (   rc == VINF_SUCCESS
    84518446        || rc == VERR_PAGE_TABLE_NOT_PRESENT
     
    84908485
    84918486    /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
    8492     AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RGv", pVmxTransient->uExitQualification));
     8487    AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
    84938488
    84948489    RTGCUINT uErrorCode = 0;
     
    85028497    TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
    85038498
    8504     Log(("EPT violation %#x at %#RGv ErrorCode %#x CS:EIP=%04x:%#RX64\n", (uint32_t)pVmxTransient->uExitQualification, GCPhys,
     8499    Log(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
    85058500         uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
    85068501
     
    86938688        rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
    86948689        AssertRCReturn(rc, rc);
    8695         Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RGv CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
     8690        Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
    86968691             pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
    86978692        hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
     
    87788773                    break;
    87798774                }
    8780                 Log(("POPF %x -> %#RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
     8775                Log(("POPF %x -> %#RX64 mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
    87818776                pMixedCtx->eflags.u32 =   (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
    87828777                                        | (uEflags.u32 & X86_EFL_POPF_BITS & uMask);
     
    90048999    AssertRCReturn(rc, rc);
    90059000
    9006     Log(("#PF: cr2=%#RGv cs:rip=%#04x:%#RGv uErrCode %#RX32 cr3=%#RGv\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
     9001    Log(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel,
    90079002         pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
    90089003
  • trunk/src/VBox/VMM/VMMR0/HWVMXR0.h

    r45904 r46076  
    7474                                                            VMXReadVmcs64(idxField, pVal)                  \
    7575                                                          : VMXReadVmcs32(idxField, (uint32_t *)pVal)
    76 #  define VMXReadVmcsGstN(idxField, pVal)                 (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) ? \
    77                                                             VMXReadVmcs64(idxField, pVal)                  \
    78                                                           : VMXReadVmcs32(idxField, (uint32_t *)pVal)
    79 #  define VMXReadVmcsGstNByIdxVal(idxField, pVal)         VMXReadVmcsGstN(idxField, pVal)
     76#  define VMXReadVmcsGstN                                 VMXReadVmcsHstN
     77#  define VMXReadVmcsGstNByIdxVal                         VMXReadVmcsHstN
    8078# elif HC_ARCH_BITS == 32
    8179#  define VMXReadVmcsHstN                                 VMXReadVmcs32
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette