- Timestamp:
- Apr 15, 2013 4:45:27 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r45552 r45556 2561 2561 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 2562 2562 AssertRCReturn(rc, rc); 2563 Log((" VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip));2563 Log(("Load: VMX_VMCS_GUEST_RIP=%#RX64\n", pMixedCtx->rip)); 2564 2564 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP; 2565 2565 } … … 2629 2629 AssertRCReturn(rc, rc); 2630 2630 2631 Log((" VMX_VMCS_GUEST_RFLAGS=%#RX64\n", uRFlags.u64));2631 Log(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX64\n", uRFlags.u64)); 2632 2632 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS; 2633 2633 } … … 2684 2684 rc = VMXWriteVmcsGstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, u64GuestCR0); 2685 2685 AssertRCReturn(rc, rc); 2686 Log 2(("VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX64\n", u64GuestCR0));2686 Log(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX64\n", u64GuestCR0)); 2687 2687 2688 2688 /* Setup VT-x's view of the guest CR0. */ … … 2712 2712 * Guest FPU bits. 2713 2713 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first 2714 * CPUs to support VT-x (prob. means all the way up to Nehalem)and no mention of with regards to UX in VM-entry checks.2714 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 2715 2715 */ 2716 2716 u64GuestCR0 |= X86_CR0_NE; … … 2789 2789 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR0, u64GuestCR0); 2790 2790 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 2791 Log 2(("VMX_VMCS_GUEST_CR0=%#RX32\n", (uint32_t)u64GuestCR0));2791 Log(("Load: VMX_VMCS_GUEST_CR0=%RGv (uSetCR0=%#RGv uZapCR0=%#RGv)\n", u64GuestCR0, uSetCR0, uZapCR0)); 2792 2792 2793 2793 /* … … 2798 2798 uint64_t u64CR0Mask = 0; 2799 2799 u64CR0Mask = X86_CR0_PE 2800 | X86_CR0_NE 2800 2801 | X86_CR0_WP 2801 2802 | X86_CR0_PG … … 2853 2854 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP); 2854 2855 AssertRCReturn(rc, rc); 2855 Log((" VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));2856 Log(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP)); 2856 2857 2857 2858 if ( pVM->hm.s.vmx.fUnrestrictedGuest … … 2897 2898 } 2898 2899 2899 Log 2(("VMX_VMCS_GUEST_CR3=%#RX64\n", u64GuestCR3));2900 Log(("Load: VMX_VMCS_GUEST_CR3=%#RX64\n", u64GuestCR3)); 2900 2901 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, u64GuestCR3); 2901 2902 AssertRCReturn(rc, rc); … … 2914 2915 rc = VMXWriteVmcsGstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, u64GuestCR4); 2915 2916 AssertRCReturn(rc, rc); 2916 Log 2(("VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RGv\n", u64GuestCR4));2917 Log(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RGv\n", u64GuestCR4)); 2917 2918 2918 2919 /* Setup VT-x's view of the guest CR4. */ … … 2982 2983 2983 2984 /* Write VT-x's view of the guest CR4 into the VMCS. */ 2984 Log 2(("VMX_VMCS_GUEST_CR4=%#RGv (Set=%#RX32 Zap=%#RX32)\n", u64GuestCR4, uSetCR4, uZapCR4));2985 Log(("Load: VMX_VMCS_GUEST_CR4=%#RGv (Set=%#RX32 Zap=%#RX32)\n", u64GuestCR4, uSetCR4, uZapCR4)); 2985 2986 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR4, u64GuestCR4); 2986 2987 … … 3371 3372 in real-mode (e.g. OpenBSD 4.0) */ 3372 3373 REMFlushTBs(pVM); 3373 Log 2(("Switch to protected mode detected!\n"));3374 Log(("Load: Switch to protected mode detected!\n")); 3374 3375 } 3375 3376 pVCpu->hm.s.vmx.enmLastSeenGuestMode = enmGuestMode; … … 3460 3461 AssertRCReturn(rc, rc); 3461 3462 3462 Log 2(("VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));3463 Log(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base)); 3463 3464 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR; 3464 3465 } … … 3474 3475 3475 3476 Assert(!(pCtx->gdtr.cbGdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */ 3476 Log 2(("VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pCtx->gdtr.pGdt));3477 Log(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pCtx->gdtr.pGdt)); 3477 3478 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR; 3478 3479 } … … 3511 3512 } 3512 3513 3513 Log 2(("VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pCtx->ldtr.u64Base));3514 Log(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pCtx->ldtr.u64Base)); 3514 3515 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR; 3515 3516 } … … 3525 3526 3526 3527 Assert(!(pCtx->idtr.cbIdt & 0xffff0000ULL)); /* Bits 31:16 MBZ. */ 3527 Log 2(("VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pCtx->idtr.pIdt));3528 Log(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pCtx->idtr.pIdt)); 3528 3529 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR; 3529 3530 } … … 4731 4732 4732 4733 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */ 4733 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE; 4734 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE; 4735 RTGCUINTPTR GCPtrFaultAddress = 0; 4734 4736 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT) 4735 4737 { 4736 4738 if ( hmR0VmxIsBenignXcpt(uIdtVector) 4737 || hmR0VmxIsBenignXcpt(uExitVector)) 4739 || hmR0VmxIsBenignXcpt(uExitVector) 4740 || ( hmR0VmxIsContributoryXcpt(uIdtVector) 4741 && uExitVector == X86_XCPT_PF)) 4738 4742 { 4739 4743 enmReflect = VMXREFLECTXCPT_XCPT; … … 4744 4748 pVmxTransient->fVectoringPF = true; 4745 4749 enmReflect = VMXREFLECTXCPT_XCPT; 4750 GCPtrFaultAddress = pMixedCtx->cr2; 4751 Log(("Vectoring #PF uCR2=%#RGv\n", pMixedCtx->cr2)); 4746 4752 } 4747 4753 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & VMX_CONTRIBUTORY_XCPT_BITMAP) … … 4778 4784 } 4779 4785 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo), 4780 0 /* cbInstr */, u32ErrCode, 0 /* GCPtrFaultAddress */);4786 0 /* cbInstr */, u32ErrCode, GCPtrFaultAddress); 4781 4787 rc = VINF_SUCCESS; 4782 Log(("Pending event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));4788 Log(("Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode)); 4783 4789 break; 4784 4790 } … … 4788 4794 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 4789 4795 rc = VINF_VMX_DOUBLE_FAULT; 4790 Log(("Pending #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uIdtVector,4796 Log(("Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uIdtVector, 4791 4797 uExitVector)); 4792 4798 break; … … 4795 4801 case VMXREFLECTXCPT_TF: 4796 4802 { 4797 Log(("Pending triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector));4803 Log(("Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 4798 4804 rc = VINF_EM_RESET; 4799 4805 break; … … 5210 5216 AssertRCReturn(rc, rc); 5211 5217 } 5218 5212 5219 pVCpu->hm.s.vmx.fUpdatedGuestState |= VMX_UPDATED_GUEST_CR3; 5213 5220 } … … 5519 5526 { 5520 5527 AssertRC(rc); 5521 Log 2(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));5528 Log(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc)); 5522 5529 return rc; 5523 5530 } … … 5531 5538 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 5532 5539 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 5533 Log 2(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));5540 Log(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc)); 5534 5541 return rc; 5535 5542 } … … 5539 5546 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 5540 5547 { 5541 Log 2(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));5548 Log(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n")); 5542 5549 return VINF_EM_PENDING_REQUEST; 5543 5550 } … … 5546 5553 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 5547 5554 { 5548 Log 2(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));5555 Log(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n")); 5549 5556 return rc = VINF_PGM_POOL_FLUSH_PENDING; 5550 5557 } … … 5553 5560 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 5554 5561 { 5555 Log 2(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));5562 Log(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n")); 5556 5563 return VINF_EM_RAW_TO_R3; 5557 5564 } … … 5638 5645 AssertRC(rc); 5639 5646 Log(("Converting TRPM trap: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u u32ErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n", 5640 u32IntrInfo, enmTrpmEvent, u32ErrCode, GCPtrFaultAddress));5647 u32IntrInfo, enmTrpmEvent, cbInstr, u32ErrCode, GCPtrFaultAddress)); 5641 5648 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, u32ErrCode, GCPtrFaultAddress); 5642 5649 } … … 5674 5681 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */ 5675 5682 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT: 5676 enmTrapType = TRPM_TRAP; /** @tod does #BP and #OF come under TRAP or SOFTWARE_INT?? */5683 enmTrapType = TRPM_TRAP; 5677 5684 break; 5678 5685 default: … … 5954 5961 && !fBlockMovSS) 5955 5962 { 5956 uint8_t u8Interrupt = 0;5963 uint8_t u8Interrupt; 5957 5964 rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 5958 5965 if (RT_SUCCESS(rc)) … … 6520 6527 6521 6528 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */ 6522 Log 2(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));6529 Log(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase)); 6523 6530 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 6524 6531 AssertRCReturn(rc, rc); … … 6886 6893 } while (0) 6887 6894 6888 # define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \6889 do { \6890 AssertPtr(pVCpu); \6891 AssertPtr(pMixedCtx); \6892 AssertPtr(pVmxTransient); \6893 Assert(pVmxTransient->fVMEntryFailed == false); \6894 Assert(ASMIntAreEnabled()); \6895 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \6896 VMX_ASSERT_PREEMPT_CPUID_VAR(); \6897 LogFunc((" \n"));\6898 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \6899 if (VMMR0IsLogFlushDisabled(pVCpu)) \6900 VMX_ASSERT_PREEMPT_CPUID(); \6895 # define VMX_VALIDATE_EXIT_HANDLER_PARAMS() \ 6896 do { \ 6897 AssertPtr(pVCpu); \ 6898 AssertPtr(pMixedCtx); \ 6899 AssertPtr(pVmxTransient); \ 6900 Assert(pVmxTransient->fVMEntryFailed == false); \ 6901 Assert(ASMIntAreEnabled()); \ 6902 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 6903 VMX_ASSERT_PREEMPT_CPUID_VAR(); \ 6904 LogFunc(("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n")); \ 6905 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); \ 6906 if (VMMR0IsLogFlushDisabled(pVCpu)) \ 6907 VMX_ASSERT_PREEMPT_CPUID(); \ 6901 6908 } while (0) 6902 6909 # define VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \ 6903 do { \6904 LogFunc(("\n")); 6910 do { \ 6911 LogFunc(("\n")); \ 6905 6912 } while(0) 6906 6913 #else /* Release builds */ … … 7248 7255 else 7249 7256 { 7250 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg % RGv failed with %Rrc\n",7257 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RGv failed with %Rrc\n", 7251 7258 pVmxTransient->uExitQualification, rc)); 7252 7259 rc = VERR_EM_INTERPRETER; … … 7780 7787 { 7781 7788 case 0: /* CR0 */ 7782 Log(("CR0 write rc=%d \n", rc));7789 Log(("CR0 write rc=%d CR0=%#RGv\n", rc, pMixedCtx->cr0)); 7783 7790 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 7784 7791 break; … … 7788 7795 case 3: /* CR3 */ 7789 7796 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx)); 7790 Log(("CR3 write rc=%d \n", rc));7797 Log(("CR3 write rc=%d CR3=%#RGv\n", rc, pMixedCtx->cr3)); 7791 7798 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 7792 7799 break; 7793 7800 case 4: /* CR4 */ 7794 Log(("CR4 write rc=%d \n", rc));7801 Log(("CR4 write rc=%d CR4=%#RGv\n", rc, pMixedCtx->cr4)); 7795 7802 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4; 7796 7803 break; … … 7812 7819 { 7813 7820 /* EMInterpretCRxRead() requires EFER MSR, CS. */ 7814 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);7821 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7815 7822 AssertRCReturn(rc, rc); 7816 7823 Assert( !pVM->hm.s.fNestedPaging … … 8388 8395 { 8389 8396 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 8397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 8390 8398 8391 8399 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 8392 8400 AssertRCReturn(rc, rc); 8393 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);8394 8401 8395 8402 if (!(pMixedCtx->cr0 & X86_CR0_NE)) … … 8411 8418 { 8412 8419 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 8420 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); 8413 8421 8414 8422 /** @todo Try optimize this by not saving the entire guest state unless … … 8416 8424 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8417 8425 AssertRCReturn(rc, rc); 8418 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);8419 8426 8420 8427 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 8442 8449 { 8443 8450 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 8451 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 8444 8452 8445 8453 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); … … 8454 8462 PVM pVM = pVCpu->CTX_SUFF(pVM); 8455 8463 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6); 8456 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);8457 8464 if (rc == VINF_EM_RAW_GUEST_TRAP) 8458 8465 { … … 8502 8509 int rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 8503 8510 AssertRCReturn(rc, rc); 8511 8512 rc = hmR0VmxSaveGuestGprs(pVCpu, pMixedCtx); 8513 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8514 Log(("Rip %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 8504 8515 8505 8516 /* Lazy FPU loading; Load the guest-FPU state transparently and continue execution of the guest. */ … … 8533 8544 { 8534 8545 VMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 8546 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 8535 8547 8536 8548 int rc = VERR_INTERNAL_ERROR_5; … … 8543 8555 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient); 8544 8556 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 8545 rc |= hmR0VmxSaveGuest Rip(pVCpu, pMixedCtx);8557 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8546 8558 AssertRCReturn(rc, rc); 8547 Log(("#GP Gst: RIP %#RX64\n", pMixedCtx->rip)); 8559 Log(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RGv CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, 8560 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu))); 8548 8561 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 8549 8562 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */); … … 8571 8584 rc = VINF_SUCCESS; 8572 8585 Assert(cbOp == pDis->cbInstr); 8573 Log 2(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));8586 Log(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 8574 8587 switch (pDis->pCurInstr->uOpcode) 8575 8588 { … … 8629 8642 break; 8630 8643 } 8631 Log(("POPF %x -> % RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));8644 Log(("POPF %x -> %#RGv mask=%x RIP=%#RX64\n", uEflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip)); 8632 8645 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask)) 8633 8646 | (uEflags.u32 & X86_EFL_POPF_BITS & uMask); … … 8678 8691 break; 8679 8692 } 8680 Log(("PUSHF %x -> % RGv\n", uEflags.u, GCPtrStack));8693 Log(("PUSHF %x -> %#RGv\n", uEflags.u, GCPtrStack)); 8681 8694 pMixedCtx->esp -= cbParm; 8682 8695 pMixedCtx->esp &= uMask; … … 8747 8760 rc = VBOXSTRICTRC_VAL(rc2); 8748 8761 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 8749 Log 2(("#GP rc=%Rrc\n", rc));8762 Log(("#GP rc=%Rrc\n", rc)); 8750 8763 break; 8751 8764 } … … 8774 8787 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in 8775 8788 hmR0VmxCheckExitDueToEventDelivery(). */ 8789 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient); 8790 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient); 8791 AssertRCReturn(rc, rc); 8792 Assert(pVmxTransient->fVmcsFieldsRead & VMX_TRANSIENT_EXIT_INTERRUPTION_INFO); 8776 8793 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 8777 8794 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */); … … 8789 8806 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 8790 8807 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient); 8791 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);8792 8808 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient); 8793 8809 AssertRCReturn(rc, rc); … … 8799 8815 { 8800 8816 pMixedCtx->cr2 = pVmxTransient->uExitQualification; 8817 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */ 8801 8818 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 8802 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr2); 8819 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr2); 8820 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 8803 8821 } 8804 8822 else 8805 8823 { 8806 8824 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 8807 Log(("Pending #DF due to vectoring #PF.\n"));8808 pVCpu->hm.s.Event.fPending = false;8809 rc = hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);8825 pVCpu->hm.s.Event.fPending = false; /* A vectoring #PF. */ 8826 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 8827 Log(("Pending #DF due to vectoring #PF. NP\n")); 8810 8828 } 8811 8829 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); … … 8847 8865 #endif 8848 8866 8849 Assert(!pVmxTransient->fVectoringPF || pVCpu->hm.s.Event.fPending);8850 8851 8867 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 8852 8868 AssertRCReturn(rc, rc); 8853 8869 8870 Log(("#PF: cr2=%#RGv cs:rip=%#04x:%#RGv uErrCode %#RX32\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel, 8871 pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode)); 8872 8854 8873 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode); 8855 8856 /* Forward it to the trap handler first. */8857 8874 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx), 8858 8875 (RTGCPTR)pVmxTransient->uExitQualification); 8859 8876 8860 Log(("#PF: cr2=%RGv cs:rip=%04x:%RGv errorcode %#RX32 rc=%d\n", pVmxTransient->uExitQualification, pMixedCtx->cs.Sel, 8861 pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, rc)); 8862 8877 Log(("#PF: rc=%Rrc\n", rc)); 8863 8878 if (rc == VINF_SUCCESS) 8864 8879 { … … 8874 8889 else if (rc == VINF_EM_RAW_GUEST_TRAP) 8875 8890 { 8876 if ( RT_LIKELY(!pVmxTransient->fVectoringPF))8891 if (!pVmxTransient->fVectoringPF) 8877 8892 { 8878 8893 /* It's a guest page fault and needs to be reflected to the guest. */ … … 8880 8895 TRPMResetTrap(pVCpu); 8881 8896 pMixedCtx->cr2 = pVmxTransient->uExitQualification; 8897 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */ 8882 8898 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo), 8883 8899 0 /* cbInstr */, uGstErrorCode, pMixedCtx->cr2); … … 8887 8903 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 8888 8904 TRPMResetTrap(pVCpu); 8889 pVCpu->hm.s.Event.fPending = false; 8905 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF for replace it with #DF. */ 8890 8906 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 8891 8907 Log(("#PF: Pending #DF due to vectoring #PF\n"));
Note:
See TracChangeset
for help on using the changeset viewer.