Changeset 70700 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jan 23, 2018 10:34:56 AM (7 years ago)
- svn:sync-xref-src-repo-rev:
- 120441
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r70465 r70700 40 40 #ifdef DEBUG_ramshankar 41 41 # define HMSVM_SYNC_FULL_GUEST_STATE 42 # define HMSVM_SYNC_FULL_NESTED_GUEST_STATE 42 43 # define HMSVM_ALWAYS_TRAP_ALL_XCPTS 43 44 # define HMSVM_ALWAYS_TRAP_PF … … 135 136 #else 136 137 # define HMSVM_ASSERT_IN_NESTED_GUEST(a_pCtx) do { NOREF((a_pCtx)); } while (0) 138 #endif 139 140 /** Validate segment descriptor granularity bit. */ 141 #ifdef VBOX_STRICT 142 # define HMSVM_ASSERT_SEG_GRANULARITY(reg) \ 143 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \ 144 || ( pMixedCtx->reg.Attr.n.u1Granularity \ 145 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \ 146 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \ 147 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \ 148 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base)) 149 #else 150 # define HMSVM_ASSERT_SEG_GRANULARITY(reg) do { } while (0) 137 151 #endif 138 152 … … 380 394 #endif 381 395 396 397 #ifdef VBOX_STRICT 398 # define HMSVM_LOG_CS RT_BIT_32(0) 399 # define HMSVM_LOG_SS RT_BIT_32(1) 400 # define HMSVM_LOG_FS RT_BIT_32(2) 401 # define HMSVM_LOG_GS RT_BIT_32(3) 402 # define HMSVM_LOG_LBR RT_BIT_32(4) 403 # define HMSVM_LOG_ALL ( HMSVM_LOG_CS \ 404 | HMSVM_LOG_SS \ 405 | HMSVM_LOG_FS \ 406 | HMSVM_LOG_GS \ 407 | HMSVM_LOG_LBR) 408 409 /** 410 * Dumps CPU state and additional info. to the logger for diagnostics. 411 * 412 * @param pVCpu The cross context virtual CPU structure. 413 * @param pVmcb Pointer to the VM control block. 414 * @param pCtx Pointer to the guest-CPU context. 415 * @param pszPrefix Log prefix. 416 * @param fFlags Log flags, see HMSVM_LOG_XXX. 417 * @param uVerbose The verbosity level, currently unused. 418 */ 419 static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PCPUMCTX pCtx, const char *pszPrefix, uint32_t fFlags, 420 uint8_t uVerbose) 421 { 422 RT_NOREF(uVerbose); 423 424 Log4(("%s: cs:rip=%04x:%RX64 efl=%#RX32 cr0=%#RX32 cr3=%#RX32 cr4=%#RX32\n", pszPrefix, pCtx->cs.Sel, pCtx->rip, 425 pCtx->eflags.u, pCtx->cr0, pCtx->cr3, pCtx->cr4)); 426 Log4(("%s: rsp=%#RX64 rbp=%#RX64 rdi=%#RX64\n", pszPrefix, pCtx->rsp, pCtx->rbp, pCtx->rdi)); 427 if (fFlags & HMSVM_LOG_CS) 428 { 429 Log4(("%s: cs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->cs.Sel, pCtx->cs.u64Base, 430 pCtx->cs.u32Limit, pCtx->cs.Attr.u)); 431 } 432 if (fFlags & HMSVM_LOG_SS) 433 { 434 Log4(("%s: ss={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->ss.Sel, pCtx->ss.u64Base, 435 pCtx->ss.u32Limit, pCtx->ss.Attr.u)); 436 } 437 if (fFlags & HMSVM_LOG_FS) 438 { 439 Log4(("%s: fs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->fs.Sel, pCtx->fs.u64Base, 440 pCtx->fs.u32Limit, pCtx->fs.Attr.u)); 441 } 442 if (fFlags & HMSVM_LOG_GS) 443 { 444 Log4(("%s: gs={%04x base=%016RX64 limit=%08x flags=%08x}\n", pszPrefix, pCtx->gs.Sel, pCtx->gs.u64Base, 445 pCtx->gs.u32Limit, pCtx->gs.Attr.u)); 446 } 447 448 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest; 449 if (fFlags & HMSVM_LOG_LBR) 450 { 451 Log4(("%s: br_from=%#RX64 br_to=%#RX64 lastxcpt_from=%#RX64 lastxcpt_to=%#RX64\n", pszPrefix, pVmcbGuest->u64BR_FROM, 452 pVmcbGuest->u64BR_TO, pVmcbGuest->u64LASTEXCPFROM, pVmcbGuest->u64LASTEXCPTO)); 453 } 454 } 455 #endif 456 457 382 458 /** 383 459 * Sets up and activates AMD-V on the current CPU. … … 1499 1575 1500 1576 /** 1501 * Loads the guest segment registers into the VMCB.1577 * Loads the guest (or nested-guest) segment registers into the VMCB. 1502 1578 * 1503 1579 * @returns VBox status code. … … 1560 1636 1561 1637 /** 1562 * Loads the guest MSRs into the VMCB.1638 * Loads the guest (or nested-guest) MSRs into the VMCB. 1563 1639 * 1564 1640 * @param pVCpu The cross context virtual CPU structure. … … 1590 1666 if (CPUMIsGuestInLongModeEx(pCtx)) 1591 1667 { 1668 /* Load these always as the guest may modify FS/GS base using MSRs in 64-bit mode which we don't intercept. */ 1592 1669 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base; 1593 1670 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base; 1671 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG; 1594 1672 } 1595 1673 else … … 2188 2266 Log4(("hmR0SvmLoadGuestState: SS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pCtx->ss.Sel, pCtx->ss.u64Base, 2189 2267 pCtx->ss.u32Limit, pCtx->ss.Attr.u)); 2268 Log4(("hmR0SvmLoadGuestState: FS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pCtx->fs.Sel, pCtx->fs.u64Base, 2269 pCtx->fs.u32Limit, pCtx->fs.Attr.u)); 2190 2270 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 2191 2271 return rc; … … 2320 2400 AssertRCReturn(rc, rc); 2321 2401 2402 /* 2403 * We need to load the entire state (including FS, GS etc.) as we could be continuing 2404 * to execute the nested-guest at any point (not just immediately after VMRUN) and thus 2405 * the VMCB can possibly be out-of-sync with the actual nested-guest state if it was 2406 * executed in IEM. 2407 */ 2322 2408 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx); 2323 2409 hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx); … … 2356 2442 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 2357 2443 2358 Log4(("hmR0SvmLoadGuestStateNested: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 (HyperCR3=%#RX64) CR4=%#RX32 " 2359 "ESP=%#RX32 EBP=%#RX32 rc=%d\n", pCtx->cs.Sel, pCtx->rip, pCtx->eflags.u, pCtx->cr0, pCtx->cr3, 2360 pVmcbNstGst->guest.u64CR3, pCtx->cr4, pCtx->esp, pCtx->ebp, rc)); 2361 Log4(("hmR0SvmLoadGuestStateNested: SS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pCtx->ss.Sel, pCtx->ss.u64Base, 2362 pCtx->ss.u32Limit, pCtx->ss.Attr.u)); 2444 #ifdef VBOX_STRICT 2445 hmR0SvmLogState(pVCpu, pVmcbNstGst, pCtx, "hmR0SvmLoadGuestStateNested", HMSVM_LOG_ALL, 0 /* uVerbose */); 2446 #endif 2363 2447 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 2364 2365 2448 return rc; 2366 2449 } … … 2413 2496 2414 2497 /** 2415 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU context. 2498 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU 2499 * context. 2416 2500 * 2417 2501 * Currently there is no residual state left in the CPU that is not updated in the … … 2495 2579 } 2496 2580 2497 #ifdef VBOX_STRICT2498 # define HMSVM_ASSERT_SEG_GRANULARITY(reg) \2499 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \2500 || ( pMixedCtx->reg.Attr.n.u1Granularity \2501 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \2502 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \2503 ("Invalid Segment Attributes Limit=%#RX32 Attr=%#RX32 Base=%#RX64\n", pMixedCtx->reg.u32Limit, \2504 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base))2505 2506 2581 HMSVM_ASSERT_SEG_GRANULARITY(cs); 2507 2582 HMSVM_ASSERT_SEG_GRANULARITY(ss); … … 2510 2585 HMSVM_ASSERT_SEG_GRANULARITY(fs); 2511 2586 HMSVM_ASSERT_SEG_GRANULARITY(gs); 2512 2513 # undef HMSVM_ASSERT_SEL_GRANULARITY2514 #endif2515 2587 2516 2588 /* … … 2545 2617 2546 2618 /* 2547 * Guest Descriptor-Table registers .2619 * Guest Descriptor-Table registers (GDTR, IDTR, LDTR). 2548 2620 */ 2549 2621 HMSVM_SEG_REG_COPY_FROM_VMCB(pMixedCtx, &pVmcb->guest, LDTR, ldtr); … … 2579 2651 } 2580 2652 2653 #ifdef VBOX_STRICT 2581 2654 if (CPUMIsGuestInSvmNestedHwVirtMode(pMixedCtx)) 2582 { 2583 Log4(("hmR0SvmSaveGuestState: CS:RIP=%04x:%RX64 EFL=%#x CR0=%#RX32 CR3=%#RX32 CR4=%#RX32 ESP=%#RX32 EBP=%#RX32\n", 2584 pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->eflags.u, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 2585 pMixedCtx->esp, pMixedCtx->ebp)); 2586 Log4(("hmR0SvmSaveGuestState: SS={%04x base=%016RX64 limit=%08x flags=%08x}\n", pMixedCtx->ss.Sel, pMixedCtx->ss.u64Base, 2587 pMixedCtx->ss.u32Limit, pMixedCtx->ss.Attr.u)); 2588 Log4(("hmR0SvmSaveGuestState: DBGCTL BR_FROM=%#RX64 BR_TO=%#RX64 XcptFrom=%#RX64 XcptTo=%#RX64\n", 2589 pVmcb->guest.u64BR_FROM, pVmcb->guest.u64BR_TO,pVmcb->guest.u64LASTEXCPFROM, pVmcb->guest.u64LASTEXCPTO)); 2590 } 2655 hmR0SvmLogState(pVCpu, pVmcb, pMixedCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */); 2656 #endif 2591 2657 } 2592 2658 … … 3393 3459 * see AMD spec. 15.21.4 "Injecting Virtual (INTR) Interrupts". 3394 3460 */ 3395 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 3396 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3397 && !fIntShadow 3398 && !pVCpu->hm.s.fSingleInstruction 3399 && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx)) 3400 { 3401 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR) 3461 if (!fIntShadow) 3462 { 3463 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = &pVCpu->hm.s.svm.NstGstVmcbCache; 3464 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) 3465 && !pVCpu->hm.s.fSingleInstruction 3466 && CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx)) 3402 3467 { 3403 Log4(("Intercepting external interrupt -> #VMEXIT\n")); 3404 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3468 if (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_INTR) 3469 { 3470 Log4(("Intercepting external interrupt -> #VMEXIT\n")); 3471 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3472 } 3473 3474 uint8_t u8Interrupt; 3475 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3476 if (RT_SUCCESS(rc)) 3477 { 3478 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 3479 3480 SVMEVENT Event; 3481 Event.u = 0; 3482 Event.n.u1Valid = 1; 3483 Event.n.u8Vector = u8Interrupt; 3484 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3485 3486 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3487 } 3488 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3489 { 3490 /* 3491 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be 3492 * updated eventually when the TPR is written by the guest. 3493 */ 3494 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3495 } 3496 else 3497 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3405 3498 } 3406 3499 3407 uint8_t u8Interrupt; 3408 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 3409 if (RT_SUCCESS(rc)) 3500 /* 3501 * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields) 3502 * interrupt injection. The virtual interrupt injection itself, if any, will be done 3503 * by the physical CPU. 3504 */ 3505 #if 0 3506 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST) 3507 && (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR) 3508 && CPUMCanSvmNstGstTakeVirtIntr(pCtx)) 3410 3509 { 3411 Log4(("Injecting external interrupt u8Interrupt=%#x\n", u8Interrupt)); 3412 3413 SVMEVENT Event; 3414 Event.u = 0; 3415 Event.n.u1Valid = 1; 3416 Event.n.u8Vector = u8Interrupt; 3417 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 3418 3419 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3510 Log4(("Intercepting virtual interrupt -> #VMEXIT\n")); 3511 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0); 3420 3512 } 3421 else if (rc == VERR_APIC_INTR_MASKED_BY_TPR) 3422 { 3423 /* 3424 * AMD-V has no TPR thresholding feature. TPR and the force-flag will be 3425 * updated eventually when the TPR is written by the guest. 3426 */ 3427 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchTprMaskedIrq); 3428 } 3429 else 3430 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 3431 } 3432 3433 /* 3434 * Check if the nested-guest is intercepting virtual (using V_IRQ and related fields) 3435 * interrupt injection. The virtual interrupt injection itself, if any, will be done 3436 * by the physical CPU. 3437 */ 3438 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST) 3439 && (pVmcbNstGstCache->u64InterceptCtrl & SVM_CTRL_INTERCEPT_VINTR) 3440 && CPUMCanSvmNstGstTakeVirtIntr(pCtx)) 3441 { 3442 Log4(("Intercepting virtual interrupt -> #VMEXIT\n")); 3443 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0); 3513 #endif 3444 3514 } 3445 3515 } … … 3483 3553 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 3484 3554 3485 Log4Func(("f Gif=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fGif, fBlockInt, fIntShadow,3555 Log4Func(("fBlockInt=%RTbool fIntShadow=%RTbool APIC/PIC_Pending=%RTbool\n", fBlockInt, fIntShadow, 3486 3556 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))); 3487 3557 … … 3563 3633 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 3564 3634 3565 bool const fIsNestedGuest = CPUMIsGuestInSvmNestedHwVirtMode(pCtx); 3566 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3567 bool const fBlockInt = !fIsNestedGuest ? !(pCtx->eflags.u32 & X86_EFL_IF) : CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx); 3635 #ifdef VBOX_STRICT 3636 bool const fIntShadow = hmR0SvmIsIntrShadowActive(pVCpu, pCtx); 3637 bool const fGif = pCtx->hwvirt.svm.fGif; 3638 bool fAllowInt = fGif; 3639 if (fGif) 3640 { 3641 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3642 fAllowInt = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx); 3643 else 3644 fAllowInt = RT_BOOL(pCtx->eflags.u32 & X86_EFL_IF); 3645 } 3646 #endif 3568 3647 3569 3648 if (pVCpu->hm.s.Event.fPending) … … 3578 3657 if (Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ) 3579 3658 { 3580 Assert( !fBlockInt);3659 Assert(fAllowInt); 3581 3660 Assert(!fIntShadow); 3582 3661 } 3583 3662 else if (Event.n.u3Type == SVM_EVENT_NMI) 3663 { 3664 Assert(fGif); 3584 3665 Assert(!fIntShadow); 3585 NOREF(fBlockInt);3666 } 3586 3667 3587 3668 /* … … 3901 3982 if (rcStrict != VINF_SUCCESS) 3902 3983 return VBOXSTRICTRC_VAL(rcStrict); 3984 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 3985 return VINF_SVM_VMEXIT; 3903 3986 } 3904 3987 … … 3914 3997 return VINF_EM_RAW_INJECT_TRPM_EVENT; 3915 3998 } 3999 4000 #ifdef HMSVM_SYNC_FULL_NESTED_GUEST_STATE 4001 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 4002 #endif 3916 4003 3917 4004 /* … … 4752 4839 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4753 4840 rc = hmR0SvmPreRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient); 4754 if (rc != VINF_SUCCESS) 4841 if ( rc != VINF_SUCCESS 4842 || !CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4843 { 4755 4844 break; 4845 } 4756 4846 4757 4847 /* … … 5388 5478 case SVM_EXIT_WRITE_CR4: 5389 5479 case SVM_EXIT_WRITE_CR8: 5480 { 5481 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0; 5482 Log4(("hmR0SvmHandleExitNested: Write CR%u\n", uCr)); 5390 5483 return hmR0SvmExitWriteCRx(pVCpu, pCtx, pSvmTransient); 5484 } 5391 5485 5392 5486 case SVM_EXIT_PAUSE: … … 6524 6618 HMCPU_CF_SET(pVCpu, HM_CHANGED_SVM_GUEST_APIC_STATE); 6525 6619 } 6526 else if (pCtx->ecx == MSR_K6_EFER) 6527 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR); 6528 else if (pCtx->ecx == MSR_IA32_TSC) 6529 pSvmTransient->fUpdateTscOffsetting = true; 6620 else 6621 { 6622 switch (pCtx->ecx) 6623 { 6624 case MSR_K6_EFER: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR); break; 6625 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break; 6626 case MSR_K8_FS_BASE: 6627 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 6628 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 6629 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 6630 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 6631 } 6632 } 6530 6633 } 6531 6634 } … … 7482 7585 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7483 7586 7484 P SVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx);7587 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 7485 7588 uint8_t const uVector = pVmcb->ctrl.u64ExitCode - SVM_EXIT_EXCEPTION_0; 7486 7589 uint32_t const uErrCode = pVmcb->ctrl.u64ExitInfo1; … … 7488 7591 Assert(uVector <= X86_XCPT_LAST); 7489 7592 Log4(("hmR0SvmExitXcptGeneric: uVector=%#x uErrCode=%u\n", uVector, uErrCode)); 7490 7491 7593 7492 7594 SVMEVENT Event; … … 7607 7709 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 7608 7710 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr); 7609 7610 7711 return VBOXSTRICTRC_VAL(rcStrict); 7611 7712 }
Note:
See TracChangeset
for help on using the changeset viewer.