Changeset 46580 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 17, 2013 10:20:31 AM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 86455
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46569 r46580 2628 2628 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX); 2629 2629 2630 int rc; 2630 /* 2631 * The ordering of the case labels is based on most-frequently-occurring VM-exits for most guests under 2632 * normal workloads (for some definition of "normal"). 2633 */ 2631 2634 uint32_t u32ExitCode = pSvmTransient->u64ExitCode; 2632 2635 switch (pSvmTransient->u64ExitCode) 2633 2636 { 2637 case SVM_EXIT_NPF: 2638 return hmR0SvmExitNestedPF(pVCpu, pCtx, pSvmTransient); 2639 2634 2640 case SVM_EXIT_IOIO: 2635 2641 return hmR0SvmExitIOInstr(pVCpu, pCtx, pSvmTransient); 2636 2642 2643 case SVM_EXIT_RDTSC: 2644 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient); 2645 2646 case SVM_EXIT_RDTSCP: 2647 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient); 2648 2637 2649 case SVM_EXIT_CPUID: 2638 2650 return hmR0SvmExitCpuid(pVCpu, pCtx, pSvmTransient); 2639 2651 2640 case SVM_EXIT_RDTSC:2641 return hmR0SvmExitRdtsc(pVCpu, pCtx, pSvmTransient);2642 2643 case SVM_EXIT_RDTSCP:2644 return hmR0SvmExitRdtscp(pVCpu, pCtx, pSvmTransient);2645 2646 2652 case SVM_EXIT_MONITOR: 2647 2653 return hmR0SvmExitMonitor(pVCpu, pCtx, pSvmTransient); … … 2649 2655 case SVM_EXIT_MWAIT: 2650 2656 return hmR0SvmExitMwait(pVCpu, pCtx, pSvmTransient); 2657 2658 case SVM_EXIT_READ_CR0: 2659 case SVM_EXIT_READ_CR3: 2660 case SVM_EXIT_READ_CR4: 2661 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient); 2651 2662 2652 2663 case SVM_EXIT_WRITE_CR0: … … 2656 2667 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 2657 2668 2658 case SVM_EXIT_READ_CR0:2659 case SVM_EXIT_READ_CR3:2660 case SVM_EXIT_READ_CR4:2661 return hmR0SvmExitReadCRx(pVCpu, pCtx, pSvmTransient);2662 2663 case SVM_EXIT_MSR:2664 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient);2665 2666 2669 case SVM_EXIT_INTR: 2667 2670 case SVM_EXIT_FERR_FREEZE: … … 2669 2672 case SVM_EXIT_INIT: 2670 2673 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient); 2674 2675 case SVM_EXIT_MSR: 2676 return hmR0SvmExitMsr(pVCpu, pCtx, pSvmTransient); 2671 2677 2672 2678 case SVM_EXIT_WBINVD: … … 2705 2711 default: 2706 2712 { 2707 rc = VERR_SVM_UNEXPECTED_EXIT;2708 2713 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit code %#x\n", u32ExitCode)); 2709 break;2714 return VERR_SVM_UNEXPECTED_EXIT; 2710 2715 } 2711 2716 } 2712 2717 } 2713 return rc;2718 return VERR_INTERNAL_ERROR_5; /* Should never happen. */ 2714 2719 } 2715 2720 … … 3392 3397 } 3393 3398 3399 3400 /** 3401 * #VMEXIT handler for Nested Page-faults (SVM_EXIT_NPF). Conditional 3402 * #VMEXIT. 3403 */ 3404 HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 3405 { 3406 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3407 PVM pVM = pVCpu->CTX_SUFF(pVM); 3408 Assert(pVM->hm.s.fNestedPaging); 3409 3410 /* See AMD spec. 15.25.6 "Nested versus Guest Page Faults, Fault Ordering" for VMCB details for #NPF. */ 3411 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3412 uint32_t u32ErrCode = pVmcb->ctrl.u64ExitInfo1; 3413 RTGCPHYS GCPhysFaultAddr = pVmcb->ctrl.u64ExitInfo2; 3414 3415 Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode)); 3416 3417 #ifdef VBOX_HM_WITH_GUEST_PATCHING 3418 /* TPR patching for 32-bit guests, using the reserved bit in the page tables for MMIO regions. */ 3419 if ( pVM->hm.s.fTRPPatchingAllowed 3420 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == 0x80 3421 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */ 3422 || (u32ErrCode & (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) == (X86_TRAP_PF_P | X86_TRAP_PF_RSVD)) /* MMIO page. */ 3423 && !CPUMGetGuestCPL(pVCpu) 3424 && !CPUMIsGuestInLongModeEx(pCtx) 3425 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 3426 { 3427 RTGCPHYS GCPhysApicBase = pCtx->msrApicBase; 3428 GCPhysApicBase &= PAGE_BASE_GC_MASK; 3429 3430 if (GCPhysFaultAddr == GCPhysApicBase + 0x80) 3431 { 3432 /* Only attempt to patch the instruction once. */ 3433 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 3434 if (!pPatch) 3435 { 3436 rc = VINF_EM_HM_PATCH_TPR_INSTR; 3437 return rc; 3438 } 3439 } 3440 } 3441 #endif 3442 3443 /* 3444 * Determine the nested paging mode. 3445 */ 3446 PGMMODE enmNestedPagingMode; 3447 #if HC_ARCH_BITS == 32 3448 if (CPUMIsGuestInLongModeEx(pCtx)) 3449 enmNestedPagingMode = PGMMODE_AMD64_NX; 3450 else 3451 #endif 3452 enmNestedPagingMode = PGMGetHostMode(pVM); 3453 3454 /* 3455 * MMIO optimization using the reserved (RSVD) bit in the guest page tables for MMIO pages. 3456 */ 3457 int rc; 3458 Assert((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) != X86_TRAP_PF_RSVD); 3459 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) 3460 { 3461 rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr, u32ErrCode); 3462 3463 /* 3464 * If we succeed, resume guest execution. 3465 * If we fail in interpreting the instruction because we couldn't get the guest physical address 3466 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page 3467 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this 3468 * weird case. See @bugref{6043}. 3469 */ 3470 if ( rc == VINF_SUCCESS 3471 || rc == VERR_PAGE_TABLE_NOT_PRESENT 3472 || rc == VERR_PAGE_NOT_PRESENT) 3473 { 3474 /* Successfully handled MMIO operation. */ 3475 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_SVM_GUEST_APIC_STATE; 3476 rc = VINF_SUCCESS; 3477 } 3478 return rc; 3479 } 3480 3481 TRPMAssertXcptPF(pVCpu, GCPhysFaultAddr, u32ErrCode); 3482 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmNestedPagingMode, u32ErrCode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr); 3483 TRPMResetTrap(pVCpu); 3484 3485 Log2(("#NPF: PGMR0Trap0eHandlerNestedPaging returned %Rrc\n", rc)); 3486 3487 /* 3488 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. 3489 */ 3490 if ( rc == VINF_SUCCESS 3491 || rc == VERR_PAGE_TABLE_NOT_PRESENT 3492 || rc == VERR_PAGE_NOT_PRESENT) 3493 { 3494 /* We've successfully synced our shadow page tables. */ 3495 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 3496 rc = VINF_SUCCESS; 3497 } 3498 3499 return rc; 3500 } 3501 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r46566 r46580 8572 8572 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS 8573 8573 | HM_CHANGED_VMX_GUEST_APIC_STATE; 8574 r eturnVINF_SUCCESS;8574 rc = VINF_SUCCESS; 8575 8575 } 8576 8576 return rc;
Note:
See TracChangeset
for help on using the changeset viewer.