- Timestamp:
- Jun 21, 2017 8:29:25 AM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r67204 r67529 185 185 * @param pEvent Pointer to the SVM event. 186 186 */ 187 VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pEvent)187 VMM_INT_DECL(TRPMEVENT) HMSvmEventToTrpmEventType(PCSVMEVENT pEvent) 188 188 { 189 189 uint8_t const uType = pEvent->n.u3Type; … … 202 202 203 203 204 #ifndef IN_RC205 /**206 * Converts an IEM exception event type to an SVM event type.207 *208 * @returns The SVM event type.209 * @retval UINT8_MAX if the specified type of event isn't among the set210 * of recognized IEM event types.211 *212 * @param uVector The vector of the event.213 * @param fIemXcptFlags The IEM exception / interrupt flags.214 */215 static uint8_t hmSvmEventTypeFromIemEvent(uint32_t uVector, uint32_t fIemXcptFlags)216 {217 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)218 {219 if (uVector != X86_XCPT_NMI)220 return SVM_EVENT_EXCEPTION;221 return SVM_EVENT_NMI;222 }223 224 /* See AMD spec. Table 15-1. "Guest Exception or Interrupt Types". */225 if (fIemXcptFlags & (IEM_XCPT_FLAGS_BP_INSTR | IEM_XCPT_FLAGS_ICEBP_INSTR | IEM_XCPT_FLAGS_OF_INSTR))226 return SVM_EVENT_EXCEPTION;227 228 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_EXT_INT)229 return SVM_EVENT_EXTERNAL_IRQ;230 231 if (fIemXcptFlags & IEM_XCPT_FLAGS_T_SOFT_INT)232 return SVM_EVENT_SOFTWARE_INT;233 234 AssertMsgFailed(("hmSvmEventTypeFromIemEvent: Invalid IEM xcpt/int. type %#x, uVector=%#x\n", fIemXcptFlags, uVector));235 return UINT8_MAX;236 }237 238 239 /**240 * Performs the operations necessary that are part of the vmrun instruction241 * execution in the guest.242 *243 * @returns Strict VBox status code (i.e. informational status codes too).244 * @retval VINF_SUCCESS successully executed VMRUN and entered nested-guest245 * code execution.246 * @retval VINF_SVM_VMEXIT when executing VMRUN causes a \#VMEXIT247 * (SVM_EXIT_INVALID most likely).248 *249 * @param pVCpu The cross context virtual CPU structure.250 * @param pCtx Pointer to the guest-CPU context.251 * @param cbInstr The length of the VMRUN instruction.252 * @param GCPhysVmcb Guest physical address of the VMCB to run.253 */254 /** @todo move this to IEM and make the VMRUN version that can execute under255 * hardware SVM here instead. */256 VMM_INT_DECL(VBOXSTRICTRC) HMSvmVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbInstr, RTGCPHYS GCPhysVmcb)257 {258 Assert(pVCpu);259 Assert(pCtx);260 PVM pVM = pVCpu->CTX_SUFF(pVM);261 Log3(("HMSvmVmrun\n"));262 263 /*264 * Cache the physical address of the VMCB for #VMEXIT exceptions.265 */266 pCtx->hwvirt.svm.GCPhysVmcb = GCPhysVmcb;267 268 /*269 * Save host state.270 */271 SVMVMCBSTATESAVE VmcbNstGst;272 int rc = PGMPhysSimpleReadGCPhys(pVM, &VmcbNstGst, GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest), sizeof(SVMVMCBSTATESAVE));273 if (RT_SUCCESS(rc))274 {275 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;276 pHostState->es = pCtx->es;277 pHostState->cs = pCtx->cs;278 pHostState->ss = pCtx->ss;279 pHostState->ds = pCtx->ds;280 pHostState->gdtr = pCtx->gdtr;281 pHostState->idtr = pCtx->idtr;282 pHostState->uEferMsr = pCtx->msrEFER;283 pHostState->uCr0 = pCtx->cr0;284 pHostState->uCr3 = pCtx->cr3;285 pHostState->uCr4 = pCtx->cr4;286 pHostState->rflags = pCtx->rflags;287 pHostState->uRip = pCtx->rip + cbInstr;288 pHostState->uRsp = pCtx->rsp;289 pHostState->uRax = pCtx->rax;290 291 /*292 * Load the VMCB controls.293 */294 rc = PGMPhysSimpleReadGCPhys(pVM, &pCtx->hwvirt.svm.VmcbCtrl, GCPhysVmcb, sizeof(pCtx->hwvirt.svm.VmcbCtrl));295 if (RT_SUCCESS(rc))296 {297 PSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;298 299 /*300 * Validate guest-state and controls.301 */302 /* VMRUN must always be intercepted. */303 if (!CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VMRUN))304 {305 Log(("HMSvmVmRun: VMRUN instruction not intercepted -> #VMEXIT\n"));306 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);307 }308 309 /* Nested paging. */310 if ( pVmcbCtrl->NestedPaging.n.u1NestedPaging311 && !pVM->cpum.ro.GuestFeatures.fSvmNestedPaging)312 {313 Log(("HMSvmVmRun: Nested paging not supported -> #VMEXIT\n"));314 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);315 }316 317 /* AVIC. */318 if ( pVmcbCtrl->IntCtrl.n.u1AvicEnable319 && !pVM->cpum.ro.GuestFeatures.fSvmAvic)320 {321 Log(("HMSvmVmRun: AVIC not supported -> #VMEXIT\n"));322 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);323 }324 325 /* Last branch record (LBR) virtualization. */326 if ( (pVmcbCtrl->u64LBRVirt & SVM_LBR_VIRT_ENABLE)327 && !pVM->cpum.ro.GuestFeatures.fSvmLbrVirt)328 {329 Log(("HMSvmVmRun: LBR virtualization not supported -> #VMEXIT\n"));330 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);331 }332 333 /* Guest ASID. */334 if (!pVmcbCtrl->TLBCtrl.n.u32ASID)335 {336 Log(("HMSvmVmRun: Guest ASID is invalid -> #VMEXIT\n"));337 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);338 }339 340 /* IO permission bitmap. */341 RTGCPHYS const GCPhysIOBitmap = pVmcbCtrl->u64IOPMPhysAddr;342 if ( (GCPhysIOBitmap & X86_PAGE_4K_OFFSET_MASK)343 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap)344 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + X86_PAGE_4K_SIZE)345 || !PGMPhysIsGCPhysNormal(pVM, GCPhysIOBitmap + (X86_PAGE_4K_SIZE << 1)))346 {347 Log(("HMSvmVmRun: IO bitmap physaddr invalid. GCPhysIOBitmap=%#RX64 -> #VMEXIT\n", GCPhysIOBitmap));348 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);349 }350 351 /* MSR permission bitmap. */352 RTGCPHYS const GCPhysMsrBitmap = pVmcbCtrl->u64MSRPMPhysAddr;353 if ( (GCPhysMsrBitmap & X86_PAGE_4K_OFFSET_MASK)354 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap)355 || !PGMPhysIsGCPhysNormal(pVM, GCPhysMsrBitmap + X86_PAGE_4K_SIZE))356 {357 Log(("HMSvmVmRun: MSR bitmap physaddr invalid. GCPhysMsrBitmap=%#RX64 -> #VMEXIT\n", GCPhysMsrBitmap));358 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);359 }360 361 /* CR0. */362 if ( !(VmcbNstGst.u64CR0 & X86_CR0_CD)363 && (VmcbNstGst.u64CR0 & X86_CR0_NW))364 {365 Log(("HMSvmVmRun: CR0 no-write through with cache disabled. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));366 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);367 }368 if (VmcbNstGst.u64CR0 >> 32)369 {370 Log(("HMSvmVmRun: CR0 reserved bits set. CR0=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64CR0));371 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);372 }373 /** @todo Implement all reserved bits/illegal combinations for CR3, CR4. */374 375 /* DR6 and DR7. */376 if ( VmcbNstGst.u64DR6 >> 32377 || VmcbNstGst.u64DR7 >> 32)378 {379 Log(("HMSvmVmRun: DR6 and/or DR7 reserved bits set. DR6=%#RX64 DR7=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64DR6,380 VmcbNstGst.u64DR6));381 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);382 }383 384 /** @todo gPAT MSR validation? */385 386 /*387 * Copy the IO permission bitmap into the cache.388 */389 Assert(pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap));390 rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap), GCPhysIOBitmap,391 SVM_IOPM_PAGES * X86_PAGE_4K_SIZE);392 if (RT_FAILURE(rc))393 {394 Log(("HMSvmVmRun: Failed reading the IO permission bitmap at %#RGp. rc=%Rrc\n", GCPhysIOBitmap, rc));395 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);396 }397 398 /*399 * Copy the MSR permission bitmap into the cache.400 */401 Assert(pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap));402 rc = PGMPhysSimpleReadGCPhys(pVM, pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap), GCPhysMsrBitmap,403 SVM_MSRPM_PAGES * X86_PAGE_4K_SIZE);404 if (RT_FAILURE(rc))405 {406 Log(("HMSvmVmRun: Failed reading the MSR permission bitmap at %#RGp. rc=%Rrc\n", GCPhysMsrBitmap, rc));407 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);408 }409 410 /*411 * Copy segments from nested-guest VMCB state to the guest-CPU state.412 *413 * We do this here as we need to use the CS attributes and it's easier this way414 * then using the VMCB format selectors. It doesn't really matter where we copy415 * the state, we restore the guest-CPU context state on the \#VMEXIT anyway.416 */417 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, ES, es);418 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, CS, cs);419 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, SS, ss);420 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, &VmcbNstGst, DS, ds);421 422 /** @todo Segment attribute overrides by VMRUN. */423 424 /*425 * CPL adjustments and overrides.426 *427 * SS.DPL is apparently the CPU's CPL, see comment in CPUMGetGuestCPL().428 * We shall thus adjust both CS.DPL and SS.DPL here.429 */430 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = VmcbNstGst.u8CPL;431 if (CPUMIsGuestInV86ModeEx(pCtx))432 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 3;433 if (CPUMIsGuestInRealModeEx(pCtx))434 pCtx->cs.Attr.n.u2Dpl = pCtx->ss.Attr.n.u2Dpl = 0;435 436 /*437 * Continue validating guest-state and controls.438 */439 /* EFER, CR0 and CR4. */440 uint64_t uValidEfer;441 rc = CPUMQueryValidatedGuestEfer(pVM, VmcbNstGst.u64CR0, 0 /* uOldEfer */, VmcbNstGst.u64EFER, &uValidEfer);442 if (RT_FAILURE(rc))443 {444 Log(("HMSvmVmRun: EFER invalid uOldEfer=%#RX64 uValidEfer=%#RX64 -> #VMEXIT\n", VmcbNstGst.u64EFER, uValidEfer));445 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);446 }447 bool const fSvm = RT_BOOL(uValidEfer & MSR_K6_EFER_SVME);448 bool const fLongModeSupported = RT_BOOL(pVM->cpum.ro.GuestFeatures.fLongMode);449 bool const fLongModeEnabled = RT_BOOL(uValidEfer & MSR_K6_EFER_LME);450 bool const fPaging = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PG);451 bool const fPae = RT_BOOL(VmcbNstGst.u64CR4 & X86_CR4_PAE);452 bool const fProtMode = RT_BOOL(VmcbNstGst.u64CR0 & X86_CR0_PE);453 bool const fLongModeWithPaging = fLongModeEnabled && fPaging;454 bool const fLongModeConformCS = pCtx->cs.Attr.n.u1Long && pCtx->cs.Attr.n.u1DefBig;455 /* Adjust EFER.LMA (this is normally done by the CPU when system software writes CR0). */456 if (fLongModeWithPaging)457 uValidEfer |= MSR_K6_EFER_LMA;458 bool const fLongModeActiveOrEnabled = RT_BOOL(uValidEfer & (MSR_K6_EFER_LME | MSR_K6_EFER_LMA));459 if ( !fSvm460 || (!fLongModeSupported && fLongModeActiveOrEnabled)461 || (fLongModeWithPaging && !fPae)462 || (fLongModeWithPaging && !fProtMode)463 || ( fLongModeEnabled464 && fPaging465 && fPae466 && fLongModeConformCS))467 {468 Log(("HMSvmVmRun: EFER invalid. uValidEfer=%#RX64 -> #VMEXIT\n", uValidEfer));469 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);470 }471 472 /*473 * Preserve the required force-flags.474 *475 * We only preserve the force-flags that would affect the execution of the476 * nested-guest (or the guest).477 *478 * - VMCPU_FF_INHIBIT_INTERRUPTS need not be preserved as it's for a single479 * instruction which is this VMRUN instruction itself.480 *481 * - VMCPU_FF_BLOCK_NMIS needs to be preserved as it blocks NMI until the482 * execution of a subsequent IRET instruction in the guest.483 *484 * - The remaining FFs (e.g. timers) can stay in place so that we will be485 * able to generate interrupts that should cause #VMEXITs for the486 * nested-guest.487 */488 /** @todo anything missed more here? */489 pCtx->hwvirt.fLocalForcedActions = pVCpu->fLocalForcedActions & VMCPU_FF_BLOCK_NMIS;490 491 /*492 * Interrupt shadow.493 */494 if (pVmcbCtrl->u64IntShadow & SVM_INTERRUPT_SHADOW_ACTIVE)495 EMSetInhibitInterruptsPC(pVCpu, VmcbNstGst.u64RIP);496 497 /*498 * TLB flush control.499 * Currently disabled since it's redundant as we unconditionally flush the TLB below.500 */501 #if 0502 /** @todo @bugref{7243}: ASID based PGM TLB flushes. */503 if ( pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE504 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT505 || pVmcbCtrl->TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS)506 PGMFlushTLB(pVCpu, VmcbNstGst.u64CR3, true /* fGlobal */);507 #endif508 509 /** @todo @bugref{7243}: SVM TSC offset, see tmCpuTickGetInternal. */510 511 /*512 * Copy the remaining guest state from the VMCB to the guest-CPU context.513 */514 pCtx->gdtr.cbGdt = VmcbNstGst.GDTR.u32Limit;515 pCtx->gdtr.pGdt = VmcbNstGst.GDTR.u64Base;516 pCtx->idtr.cbIdt = VmcbNstGst.IDTR.u32Limit;517 pCtx->idtr.pIdt = VmcbNstGst.IDTR.u64Base;518 pCtx->cr0 = VmcbNstGst.u64CR0; /** @todo What about informing PGM about CR0.WP? */519 pCtx->cr4 = VmcbNstGst.u64CR4;520 pCtx->cr3 = VmcbNstGst.u64CR3;521 pCtx->cr2 = VmcbNstGst.u64CR2;522 pCtx->dr[6] = VmcbNstGst.u64DR6;523 pCtx->dr[7] = VmcbNstGst.u64DR7;524 pCtx->rflags.u = VmcbNstGst.u64RFlags;525 pCtx->rax = VmcbNstGst.u64RAX;526 pCtx->rsp = VmcbNstGst.u64RSP;527 pCtx->rip = VmcbNstGst.u64RIP;528 pCtx->msrEFER = uValidEfer;529 530 /* Mask DR6, DR7 bits mandatory set/clear bits. */531 pCtx->dr[6] &= ~(X86_DR6_RAZ_MASK | X86_DR6_MBZ_MASK);532 pCtx->dr[6] |= X86_DR6_RA1_MASK;533 pCtx->dr[7] &= ~(X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);534 pCtx->dr[7] |= X86_DR7_RA1_MASK;535 536 /*537 * Ask PGM to flush the TLB as if we continue to interpret the nested-guest538 * instructions from guest memory we'd be in trouble otherwise.539 */540 PGMFlushTLB(pVCpu, pCtx->cr3, true);541 PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);542 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);543 544 /*545 * Check for pending virtual interrupts.546 */547 if (pVmcbCtrl->IntCtrl.n.u1VIrqPending)548 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);549 550 /*551 * Clear global interrupt flags to allow interrupts in the guest.552 */553 pCtx->hwvirt.svm.fGif = 1;554 555 /*556 * Event injection.557 */558 PCSVMEVENT pEventInject = &pVmcbCtrl->EventInject;559 pCtx->hwvirt.svm.fInterceptEvents = !pEventInject->n.u1Valid;560 if (pEventInject->n.u1Valid)561 {562 uint8_t const uVector = pEventInject->n.u8Vector;563 TRPMEVENT const enmType = hmSvmEventToTrpmEventType(pEventInject);564 uint16_t const uErrorCode = pEventInject->n.u1ErrorCodeValid ? pEventInject->n.u32ErrorCode : 0;565 566 /* Validate vectors for hardware exceptions, see AMD spec. 15.20 "Event Injection". */567 if (enmType == TRPM_32BIT_HACK)568 {569 Log(("HMSvmVmRun: Invalid event type =%#x -> #VMEXIT\n", (uint8_t)pEventInject->n.u3Type));570 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);571 }572 if (pEventInject->n.u3Type == SVM_EVENT_EXCEPTION)573 {574 if ( uVector == X86_XCPT_NMI575 || uVector > X86_XCPT_LAST)576 {577 Log(("HMSvmVmRun: Invalid vector for hardware exception. uVector=%#x -> #VMEXIT\n", uVector));578 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);579 }580 if ( uVector == X86_XCPT_BR581 && CPUMIsGuestInLongModeEx(pCtx))582 {583 Log(("HMSvmVmRun: Cannot inject #BR when not in long mode -> #VMEXIT\n"));584 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INVALID, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);585 }586 /** @todo any others? */587 }588 589 /*590 * Update the exit interruption info field so that if an exception occurs591 * while delivering the event causing a #VMEXIT, we only need to update592 * the valid bit while the rest is already in place.593 */594 pVmcbCtrl->ExitIntInfo.u = pVmcbCtrl->EventInject.u;595 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0;596 597 /** @todo NRIP: Software interrupts can only be pushed properly if we support598 * NRIP for the nested-guest to calculate the instruction length599 * below. */600 Log3(("HMSvmVmRun: InjectingEvent: uVector=%u enmType=%d uErrorCode=%u cr2=%#RX64\n", uVector, enmType,601 uErrorCode, pCtx->cr2));602 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */);603 if ( rcStrict == VINF_SVM_VMEXIT604 || rcStrict == VERR_SVM_VMEXIT_FAILED)605 return rcStrict;606 }607 608 Log3(("HMSvmVmRun: Entered nested-guest at CS:RIP=%04x:%08RX64\n", pCtx->cs.Sel, pCtx->rip));609 return VINF_SUCCESS;610 }611 612 /* Shouldn't really happen as the caller should've validated the physical address already. */613 Log(("HMSvmVmRun: Failed to read nested-guest VMCB control area at %#RGp -> #VMEXIT\n",614 GCPhysVmcb));615 return VERR_SVM_IPE_4;616 }617 618 /* Shouldn't really happen as the caller should've validated the physical address already. */619 Log(("HMSvmVmRun: Failed to read nested-guest VMCB save-state area at %#RGp -> #VMEXIT\n",620 GCPhysVmcb + RT_OFFSETOF(SVMVMCB, guest)));621 return VERR_SVM_IPE_5;622 }623 624 625 /**626 * SVM nested-guest \#VMEXIT handler.627 *628 * @returns Strict VBox status code.629 * @retval VINF_SVM_VMEXIT when the \#VMEXIT is successful.630 * @retval VERR_SVM_VMEXIT_FAILED when the \#VMEXIT failed restoring the guest's631 * "host state" and a shutdown is required.632 *633 * @param pVCpu The cross context virtual CPU structure.634 * @param pCtx The guest-CPU context.635 * @param uExitCode The exit code.636 * @param uExitInfo1 The exit info. 1 field.637 * @param uExitInfo2 The exit info. 2 field.638 */639 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstVmExit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,640 uint64_t uExitInfo2)641 {642 if ( CPUMIsGuestInSvmNestedHwVirtMode(pCtx)643 || uExitCode == SVM_EXIT_INVALID)644 {645 Log3(("HMSvmNstGstVmExit: CS:RIP=%04x:%08RX64 uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pCtx->cs.Sel,646 pCtx->rip, uExitCode, uExitInfo1, uExitInfo2));647 648 /*649 * Disable the global interrupt flag to prevent interrupts during the 'atomic' world switch.650 */651 pCtx->hwvirt.svm.fGif = 0;652 653 /*654 * Save the nested-guest state into the VMCB state-save area.655 */656 SVMVMCBSTATESAVE VmcbNstGst;657 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, ES, es);658 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, CS, cs);659 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, SS, ss);660 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &VmcbNstGst, DS, ds);661 VmcbNstGst.GDTR.u32Limit = pCtx->gdtr.cbGdt;662 VmcbNstGst.GDTR.u64Base = pCtx->gdtr.pGdt;663 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.cbIdt;664 VmcbNstGst.IDTR.u32Limit = pCtx->idtr.pIdt;665 VmcbNstGst.u64EFER = pCtx->msrEFER;666 VmcbNstGst.u64CR4 = pCtx->cr4;667 VmcbNstGst.u64CR3 = pCtx->cr3;668 VmcbNstGst.u64CR2 = pCtx->cr2;669 VmcbNstGst.u64CR0 = pCtx->cr0;670 /** @todo Nested paging. */671 VmcbNstGst.u64RFlags = pCtx->rflags.u64;672 VmcbNstGst.u64RIP = pCtx->rip;673 VmcbNstGst.u64RSP = pCtx->rsp;674 VmcbNstGst.u64RAX = pCtx->rax;675 VmcbNstGst.u64DR7 = pCtx->dr[6];676 VmcbNstGst.u64DR6 = pCtx->dr[7];677 VmcbNstGst.u8CPL = pCtx->ss.Attr.n.u2Dpl; /* See comment in CPUMGetGuestCPL(). */678 679 /* Save interrupt shadow of the nested-guest instruction if any. */680 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)681 && EMGetInhibitInterruptsPC(pVCpu) == pCtx->rip)682 pCtx->hwvirt.svm.VmcbCtrl.u64IntShadow |= SVM_INTERRUPT_SHADOW_ACTIVE;683 684 /*685 * Save additional state and intercept information.686 */687 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))688 {689 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqPending);690 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u8VIntrVector);691 }692 /** @todo Save V_TPR, V_IRQ. */693 /** @todo NRIP. */694 695 /* Save exit information. */696 pCtx->hwvirt.svm.VmcbCtrl.u64ExitCode = uExitCode;697 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo1 = uExitInfo1;698 pCtx->hwvirt.svm.VmcbCtrl.u64ExitInfo2 = uExitInfo2;699 700 /*701 * Update the exit interrupt information field if this #VMEXIT happened as a result702 * of delivering an event.703 */704 {705 uint8_t uExitIntVector;706 uint32_t uExitIntErr;707 uint32_t fExitIntFlags;708 bool const fRaisingEvent = IEMGetCurrentXcpt(pVCpu, &uExitIntVector, &fExitIntFlags, &uExitIntErr,709 NULL /* uExitIntCr2 */);710 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1Valid = fRaisingEvent;711 if (fRaisingEvent)712 {713 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u8Vector = uExitIntVector;714 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u3Type = hmSvmEventTypeFromIemEvent(uExitIntVector, fExitIntFlags);715 if (fExitIntFlags & IEM_XCPT_FLAGS_ERR)716 {717 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u1ErrorCodeValid = true;718 pCtx->hwvirt.svm.VmcbCtrl.ExitIntInfo.n.u32ErrorCode = uExitIntErr;719 }720 }721 }722 723 /*724 * Clear event injection in the VMCB.725 */726 pCtx->hwvirt.svm.VmcbCtrl.EventInject.n.u1Valid = 0;727 728 /*729 * Write back the VMCB controls to the guest VMCB in guest physical memory.730 */731 int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), pCtx->hwvirt.svm.GCPhysVmcb, &pCtx->hwvirt.svm.VmcbCtrl,732 sizeof(pCtx->hwvirt.svm.VmcbCtrl));733 if (RT_SUCCESS(rc))734 {735 /*736 * Prepare for guest's "host mode" by clearing internal processor state bits.737 *738 * Some of these like TSC offset can then be used unconditionally in our TM code739 * but the offset in the guest's VMCB will remain as it should as we've written740 * back the VMCB controls above.741 */742 RT_ZERO(pCtx->hwvirt.svm.VmcbCtrl);743 #if 0744 /* Clear TSC offset. */745 pCtx->hwvirt.svm.VmcbCtrl.u64TSCOffset = 0;746 pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIrqValid = 0;747 pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u1VIntrMasking = 0;748 #endif749 /* Restore guest's force-flags. */750 if (pCtx->hwvirt.fLocalForcedActions)751 VMCPU_FF_SET(pVCpu, pCtx->hwvirt.fLocalForcedActions);752 753 /* Clear nested-guest's interrupt pending. */754 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))755 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);756 757 /** @todo Nested paging. */758 /** @todo ASID. */759 760 /*761 * Reload the guest's "host state".762 */763 PSVMHOSTSTATE pHostState = &pCtx->hwvirt.svm.HostState;764 pCtx->es = pHostState->es;765 pCtx->cs = pHostState->cs;766 pCtx->ss = pHostState->ss;767 pCtx->ds = pHostState->ds;768 pCtx->gdtr = pHostState->gdtr;769 pCtx->idtr = pHostState->idtr;770 pCtx->msrEFER = pHostState->uEferMsr;771 pCtx->cr0 = pHostState->uCr0 | X86_CR0_PE;772 pCtx->cr3 = pHostState->uCr3;773 pCtx->cr4 = pHostState->uCr4;774 pCtx->rflags = pHostState->rflags;775 pCtx->rflags.Bits.u1VM = 0;776 pCtx->rip = pHostState->uRip;777 pCtx->rsp = pHostState->uRsp;778 pCtx->rax = pHostState->uRax;779 pCtx->dr[7] &= ~(X86_DR7_ENABLED_MASK | X86_DR7_RAZ_MASK | X86_DR7_MBZ_MASK);780 pCtx->dr[7] |= X86_DR7_RA1_MASK;781 782 PGMFlushTLB(pVCpu, pCtx->cr3, true);783 PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);784 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);785 786 /** @todo if RIP is not canonical or outside the CS segment limit, we need to787 * raise \#GP(0) in the guest. */788 789 /** @todo check the loaded host-state for consistency. Figure out what790 * exactly this involves? */791 792 rc = VINF_SVM_VMEXIT;793 }794 else795 {796 Log(("HMNstGstSvmVmExit: Writing VMCB at %#RGp failed\n", pCtx->hwvirt.svm.GCPhysVmcb));797 Assert(!CPUMIsGuestInSvmNestedHwVirtMode(pCtx));798 rc = VERR_SVM_VMEXIT_FAILED;799 }800 801 Log3(("HMSvmNstGstVmExit: returns %Rrc\n", rc));802 return rc;803 }804 805 Log(("HMNstGstSvmVmExit: Not in SVM guest mode! uExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitCode,806 uExitInfo1, uExitInfo2));807 RT_NOREF2(uExitInfo1, uExitInfo2);808 return VERR_SVM_IPE_5;809 }810 811 812 /**813 * Checks whether the nested-guest is in a state to receive physical (APIC)814 * interrupts.815 *816 * @returns VBox status code.817 * @retval true if it's ready, false otherwise.818 *819 * @param pVCpu The cross context virtual CPU structure.820 * @param pCtx The guest-CPU context.821 */822 VMM_INT_DECL(bool) HMSvmNstGstCanTakePhysInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx)823 {824 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));825 RT_NOREF(pVCpu);826 827 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;828 X86EFLAGS fEFlags;829 if (!pVmcbCtrl->IntCtrl.n.u1VIntrMasking)830 fEFlags.u = pCtx->hwvirt.svm.HostState.rflags.u;831 else832 fEFlags.u = pCtx->eflags.u;833 834 return pCtx->hwvirt.svm.fGif && fEFlags.Bits.u1IF;835 }836 837 838 /**839 * Checks whether the nested-guest is in a state to receive virtual (injected by840 * VMRUN) interrupts.841 *842 * @returns VBox status code.843 * @retval true if it's ready, false otherwise.844 *845 * @param pVCpu The cross context virtual CPU structure.846 * @param pCtx The guest-CPU context.847 */848 VMM_INT_DECL(bool) HMSvmNstGstCanTakeVirtInterrupt(PVMCPU pVCpu, PCCPUMCTX pCtx)849 {850 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));851 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));852 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST));853 RT_NOREF(pVCpu);854 855 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;856 if ( !pVmcbCtrl->IntCtrl.n.u1IgnoreTPR857 && pVmcbCtrl->IntCtrl.n.u4VIntrPrio <= pVmcbCtrl->IntCtrl.n.u8VTPR)858 return false;859 860 if (!pCtx->rflags.Bits.u1IF)861 return false;862 863 if (!pCtx->hwvirt.svm.fGif)864 return false;865 866 return true;867 }868 869 870 /**871 * Gets the pending nested-guest interrupt.872 *873 * @returns The nested-guest interrupt to inject.874 * @param pCtx The guest-CPU context.875 */876 VMM_INT_DECL(uint8_t) HMSvmNstGstGetInterrupt(PCCPUMCTX pCtx)877 {878 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl;879 return pVmcbCtrl->IntCtrl.n.u8VIntrVector;880 }881 882 883 /**884 * Handles nested-guest SVM control intercepts and performs the \#VMEXIT if the885 * intercept is active.886 *887 * @returns Strict VBox status code.888 * @retval VINF_SVM_INTERCEPT_NOT_ACTIVE if the intercept is not active or889 * we're not executing a nested-guest.890 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred891 * successfully.892 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT893 * failed and a shutdown needs to be initiated for the geust.894 *895 * @param pVCpu The cross context virtual CPU structure.896 * @param pCtx The guest-CPU context.897 * @param uExitCode The SVM exit code (see SVM_EXIT_XXX).898 * @param uExitInfo1 The exit info. 1 field.899 * @param uExitInfo2 The exit info. 2 field.900 */901 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleCtrlIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,902 uint64_t uExitInfo2)903 {904 #define HMSVM_CTRL_INTERCEPT_VMEXIT(a_Intercept) \905 do { \906 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, (a_Intercept))) \907 return HMSvmNstGstVmExit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2); \908 break; \909 } while (0)910 911 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx))912 return VINF_HM_INTERCEPT_NOT_ACTIVE;913 914 switch (uExitCode)915 {916 case SVM_EXIT_EXCEPTION_0: case SVM_EXIT_EXCEPTION_1: case SVM_EXIT_EXCEPTION_2: case SVM_EXIT_EXCEPTION_3:917 case SVM_EXIT_EXCEPTION_4: case SVM_EXIT_EXCEPTION_5: case SVM_EXIT_EXCEPTION_6: case SVM_EXIT_EXCEPTION_7:918 case SVM_EXIT_EXCEPTION_8: case SVM_EXIT_EXCEPTION_9: case SVM_EXIT_EXCEPTION_10: case SVM_EXIT_EXCEPTION_11:919 case SVM_EXIT_EXCEPTION_12: case SVM_EXIT_EXCEPTION_13: case SVM_EXIT_EXCEPTION_14: case SVM_EXIT_EXCEPTION_15:920 case SVM_EXIT_EXCEPTION_16: case SVM_EXIT_EXCEPTION_17: case SVM_EXIT_EXCEPTION_18: case SVM_EXIT_EXCEPTION_19:921 case SVM_EXIT_EXCEPTION_20: case SVM_EXIT_EXCEPTION_21: case SVM_EXIT_EXCEPTION_22: case SVM_EXIT_EXCEPTION_23:922 case SVM_EXIT_EXCEPTION_24: case SVM_EXIT_EXCEPTION_25: case SVM_EXIT_EXCEPTION_26: case SVM_EXIT_EXCEPTION_27:923 case SVM_EXIT_EXCEPTION_28: case SVM_EXIT_EXCEPTION_29: case SVM_EXIT_EXCEPTION_30: case SVM_EXIT_EXCEPTION_31:924 {925 if (CPUMIsGuestSvmXcptInterceptSet(pCtx, (X86XCPT)(uExitCode - SVM_EXIT_EXCEPTION_0)))926 return HMSvmNstGstVmExit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);927 break;928 }929 930 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR1: case SVM_EXIT_WRITE_CR2: case SVM_EXIT_WRITE_CR3:931 case SVM_EXIT_WRITE_CR4: case SVM_EXIT_WRITE_CR5: case SVM_EXIT_WRITE_CR6: case SVM_EXIT_WRITE_CR7:932 case SVM_EXIT_WRITE_CR8: case SVM_EXIT_WRITE_CR9: case SVM_EXIT_WRITE_CR10: case SVM_EXIT_WRITE_CR11:933 case SVM_EXIT_WRITE_CR12: case SVM_EXIT_WRITE_CR13: case SVM_EXIT_WRITE_CR14: case SVM_EXIT_WRITE_CR15:934 {935 if (CPUMIsGuestSvmWriteCRxInterceptSet(pCtx, uExitCode - SVM_EXIT_WRITE_CR0))936 return HMSvmNstGstVmExit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);937 break;938 }939 940 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR1: case SVM_EXIT_READ_CR2: case SVM_EXIT_READ_CR3:941 case SVM_EXIT_READ_CR4: case SVM_EXIT_READ_CR5: case SVM_EXIT_READ_CR6: case SVM_EXIT_READ_CR7:942 case SVM_EXIT_READ_CR8: case SVM_EXIT_READ_CR9: case SVM_EXIT_READ_CR10: case SVM_EXIT_READ_CR11:943 case SVM_EXIT_READ_CR12: case SVM_EXIT_READ_CR13: case SVM_EXIT_READ_CR14: case SVM_EXIT_READ_CR15:944 {945 if (CPUMIsGuestSvmReadCRxInterceptSet(pCtx, uExitCode - SVM_EXIT_READ_CR0))946 return HMSvmNstGstVmExit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);947 break;948 }949 950 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3:951 case SVM_EXIT_READ_DR4: case SVM_EXIT_READ_DR5: case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7:952 case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11:953 case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15:954 {955 if (CPUMIsGuestSvmReadDRxInterceptSet(pCtx, uExitCode - SVM_EXIT_READ_DR0))956 return HMSvmNstGstVmExit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);957 break;958 }959 960 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3:961 case SVM_EXIT_WRITE_DR4: case SVM_EXIT_WRITE_DR5: case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7:962 case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11:963 case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15:964 {965 if (CPUMIsGuestSvmWriteDRxInterceptSet(pCtx, uExitCode - SVM_EXIT_WRITE_DR0))966 return HMSvmNstGstVmExit(pVCpu, pCtx, uExitCode, uExitInfo1, uExitInfo2);967 break;968 }969 970 case SVM_EXIT_INTR: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INTR);971 case SVM_EXIT_NMI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_NMI);972 case SVM_EXIT_SMI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_SMI);973 case SVM_EXIT_INIT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INIT);974 case SVM_EXIT_VINTR: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VINTR);975 case SVM_EXIT_CR0_SEL_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_CR0_SEL_WRITES);976 case SVM_EXIT_IDTR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_IDTR_READS);977 case SVM_EXIT_GDTR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_GDTR_READS);978 case SVM_EXIT_LDTR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_LDTR_READS);979 case SVM_EXIT_TR_READ: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_TR_READS);980 case SVM_EXIT_IDTR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_IDTR_WRITES);981 case SVM_EXIT_GDTR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_GDTR_WRITES);982 case SVM_EXIT_LDTR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_LDTR_WRITES);983 case SVM_EXIT_TR_WRITE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_TR_WRITES);984 case SVM_EXIT_RDTSC: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RDTSC);985 case SVM_EXIT_RDPMC: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RDPMC);986 case SVM_EXIT_PUSHF: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_PUSHF);987 case SVM_EXIT_POPF: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_POPF);988 case SVM_EXIT_CPUID: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_CPUID);989 case SVM_EXIT_RSM: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RSM);990 case SVM_EXIT_IRET: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_IRET);991 case SVM_EXIT_SWINT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INTN);992 case SVM_EXIT_INVD: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INVD);993 case SVM_EXIT_PAUSE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_PAUSE);994 case SVM_EXIT_HLT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_HLT);995 case SVM_EXIT_INVLPG: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INVLPG);996 case SVM_EXIT_INVLPGA: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_INVLPGA);997 case SVM_EXIT_TASK_SWITCH: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_TASK_SWITCH);998 case SVM_EXIT_FERR_FREEZE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_FERR_FREEZE);999 case SVM_EXIT_SHUTDOWN: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_SHUTDOWN);1000 case SVM_EXIT_VMRUN: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMRUN);1001 case SVM_EXIT_VMMCALL: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMMCALL);1002 case SVM_EXIT_VMLOAD: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMLOAD);1003 case SVM_EXIT_VMSAVE: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_VMSAVE);1004 case SVM_EXIT_STGI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_STGI);1005 case SVM_EXIT_CLGI: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_CLGI);1006 case SVM_EXIT_SKINIT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_SKINIT);1007 case SVM_EXIT_RDTSCP: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_RDTSCP);1008 case SVM_EXIT_ICEBP: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_ICEBP);1009 case SVM_EXIT_WBINVD: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_WBINVD);1010 case SVM_EXIT_MONITOR: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_MONITOR);1011 case SVM_EXIT_MWAIT: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_MWAIT);1012 case SVM_EXIT_MWAIT_ARMED: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_MWAIT_ARMED);1013 case SVM_EXIT_XSETBV: HMSVM_CTRL_INTERCEPT_VMEXIT(SVM_CTRL_INTERCEPT_XSETBV);1014 1015 case SVM_EXIT_IOIO:1016 AssertMsgFailed(("Use HMSvmNstGstHandleMsrIntercept!\n"));1017 return VERR_SVM_IPE_1;1018 1019 case SVM_EXIT_MSR:1020 AssertMsgFailed(("Use HMSvmNstGstHandleMsrIntercept!\n"));1021 return VERR_SVM_IPE_1;1022 1023 case SVM_EXIT_NPF:1024 case SVM_EXIT_AVIC_INCOMPLETE_IPI:1025 case SVM_EXIT_AVIC_NOACCEL:1026 AssertMsgFailed(("Todo Implement.\n"));1027 return VERR_SVM_IPE_1;1028 1029 default:1030 AssertMsgFailed(("Unsupported SVM exit code %#RX64\n", uExitCode));1031 return VERR_SVM_IPE_1;1032 }1033 1034 return VINF_HM_INTERCEPT_NOT_ACTIVE;1035 1036 #undef HMSVM_CTRL_INTERCEPT_VMEXIT1037 }1038 1039 1040 /**1041 * Handles nested-guest SVM IO intercepts and performs the \#VMEXIT1042 * if the intercept is active.1043 *1044 * @returns Strict VBox status code.1045 * @retval VINF_SVM_INTERCEPT_NOT_ACTIVE if the intercept is not active or1046 * we're not executing a nested-guest.1047 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred1048 * successfully.1049 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT1050 * failed and a shutdown needs to be initiated for the geust.1051 *1052 * @param pVCpu The cross context virtual CPU structure.1053 * @param pCtx The guest-CPU context.1054 * @param pIoExitInfo The SVM IOIO exit info. structure.1055 * @param uNextRip The RIP of the instruction following the IO1056 * instruction.1057 */1058 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleIOIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, PCSVMIOIOEXITINFO pIoExitInfo,1059 uint64_t uNextRip)1060 {1061 /*1062 * Check if any IO accesses are being intercepted.1063 */1064 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));1065 Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_IOIO_PROT));1066 Log3(("HMSvmNstGstHandleIOIntercept: u16Port=%#x (%u)\n", pIoExitInfo->n.u16Port, pIoExitInfo->n.u16Port));1067 1068 /*1069 * The IOPM layout:1070 * Each bit represents one 8-bit port. That makes a total of 0..65535 bits or1071 * two 4K pages.1072 *1073 * For IO instructions that access more than a single byte, the permission bits1074 * for all bytes are checked; if any bit is set to 1, the IO access is intercepted.1075 *1076 * Since it's possible to do a 32-bit IO access at port 65534 (accessing 4 bytes),1077 * we need 3 extra bits beyond the second 4K page.1078 */1079 static const uint16_t s_auSizeMasks[] = { 0, 1, 3, 0, 0xf, 0, 0, 0 };1080 uint8_t const *pbIopm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvIoBitmap);1081 Assert(pbIopm);1082 1083 uint16_t const u16Port = pIoExitInfo->n.u16Port;1084 uint16_t const offIopm = u16Port >> 3;1085 uint16_t const fSizeMask = s_auSizeMasks[(pIoExitInfo->u >> SVM_IOIO_OP_SIZE_SHIFT) & 7];1086 uint8_t const cShift = u16Port - (offIopm << 3);1087 uint16_t const fIopmMask = (1 << cShift) | (fSizeMask << cShift);1088 1089 pbIopm += offIopm;1090 uint16_t const u16Iopm = *(uint16_t *)pbIopm;1091 if (u16Iopm & fIopmMask)1092 {1093 Log3(("HMSvmNstGstHandleIOIntercept: u16Port=%#x (%u) offIoPm=%u fSizeMask=%#x cShift=%u fIopmMask=%#x\n", u16Port,1094 u16Port, offIopm, fSizeMask, cShift, fIopmMask));1095 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_IOIO, pIoExitInfo->u, uNextRip);1096 }1097 1098 AssertMsgFailed(("We expect an IO intercept here!\n"));1099 return VINF_HM_INTERCEPT_NOT_ACTIVE;1100 }1101 1102 1103 /**1104 * Handles nested-guest SVM MSR read/write intercepts and performs the \#VMEXIT1105 * if the intercept is active.1106 *1107 * @returns Strict VBox status code.1108 * @retval VINF_SVM_INTERCEPT_NOT_ACTIVE if the MSR permission bitmap does not1109 * specify interception of the accessed MSR @a idMsr.1110 * @retval VINF_SVM_VMEXIT if the intercept is active and the \#VMEXIT occurred1111 * successfully.1112 * @retval VERR_SVM_VMEXIT_FAILED if the intercept is active and the \#VMEXIT1113 * failed and a shutdown needs to be initiated for the geust.1114 *1115 * @param pVCpu The cross context virtual CPU structure.1116 * @param pCtx The guest-CPU context.1117 * @param idMsr The MSR being accessed in the nested-guest.1118 * @param fWrite Whether this is an MSR write access, @c false implies an1119 * MSR read.1120 */1121 VMM_INT_DECL(VBOXSTRICTRC) HMSvmNstGstHandleMsrIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t idMsr, bool fWrite)1122 {1123 /*1124 * Check if any MSRs are being intercepted.1125 */1126 Assert(CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_MSR_PROT));1127 Assert(CPUMIsGuestInSvmNestedHwVirtMode(pCtx));1128 1129 uint64_t const uExitInfo1 = fWrite ? SVM_EXIT1_MSR_WRITE : SVM_EXIT1_MSR_READ;1130 1131 /*1132 * Get the byte and bit offset of the permission bits corresponding to the MSR.1133 */1134 uint16_t offMsrpm;1135 uint32_t uMsrpmBit;1136 int rc = hmSvmGetMsrpmOffsetAndBit(idMsr, &offMsrpm, &uMsrpmBit);1137 if (RT_SUCCESS(rc))1138 {1139 Assert(uMsrpmBit < 0x3fff);1140 Assert(offMsrpm < SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT);1141 if (fWrite)1142 ++uMsrpmBit;1143 1144 /*1145 * Check if the bit is set, if so, trigger a #VMEXIT.1146 */1147 uint8_t *pbMsrpm = (uint8_t *)pCtx->hwvirt.svm.CTX_SUFF(pvMsrBitmap);1148 pbMsrpm += offMsrpm;1149 if (ASMBitTest(pbMsrpm, uMsrpmBit))1150 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);1151 }1152 else1153 {1154 /*1155 * This shouldn't happen, but if it does, cause a #VMEXIT and let the "host" (guest hypervisor) deal with it.1156 */1157 Log(("HMSvmNstGstHandleIntercept: Invalid/out-of-range MSR %#RX32 fWrite=%RTbool\n", idMsr, fWrite));1158 return HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_MSR, uExitInfo1, 0 /* uExitInfo2 */);1159 }1160 return VINF_HM_INTERCEPT_NOT_ACTIVE;1161 }1162 1163 1164 204 /** 1165 205 * Gets the MSR permission bitmap byte and bit offset for the specified MSR. … … 1172 212 * returned in @a pbOffMsrpm. 1173 213 */ 1174 VMM_INT_DECL(int) hmSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint32_t *puMsrpmBit)214 VMM_INT_DECL(int) HMSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint32_t *puMsrpmBit) 1175 215 { 1176 216 Assert(pbOffMsrpm); … … 1217 257 return VERR_OUT_OF_RANGE; 1218 258 } 1219 #endif /* !IN_RC */ 1220 259 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r67165 r67529 277 277 static FNSVMEXITHANDLER hmR0SvmExitVmsave; 278 278 static FNSVMEXITHANDLER hmR0SvmExitInvlpga; 279 static FNSVMEXITHANDLER hmR0SvmExitVmrun; 279 280 #endif 280 281 /** @} */ … … 589 590 uint16_t offMsrpm; 590 591 uint32_t uMsrpmBit; 591 int rc = hmSvmGetMsrpmOffsetAndBit(uMsr, &offMsrpm, &uMsrpmBit);592 int rc = HMSvmGetMsrpmOffsetAndBit(uMsr, &offMsrpm, &uMsrpmBit); 592 593 AssertRC(rc); 593 594 … … 2440 2441 uint8_t uVector = Event.n.u8Vector; 2441 2442 uint8_t uVectorType = Event.n.u3Type; 2442 TRPMEVENT enmTrapType = hmSvmEventToTrpmEventType(&Event);2443 TRPMEVENT enmTrapType = HMSvmEventToTrpmEventType(&Event); 2443 2444 2444 2445 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType)); … … 2967 2968 HMSVM_ASSERT_PREEMPT_SAFE(); 2968 2969 2969 #if def VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM2970 #if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) 2970 2971 /* Nested Hw. virt through SVM R0 execution is not yet implemented, IEM only, we shouldn't get here. */ 2971 2972 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2973 { 2974 Log2(("hmR0SvmPreRunGuest: Rescheduling to IEM due to nested-hwvirt or forced IEM exec -> VINF_EM_RESCHEDULE_REM\n")); 2972 2975 return VINF_EM_RESCHEDULE_REM; 2976 } 2973 2977 #endif 2974 2978 … … 3640 3644 case SVM_EXIT_VMSAVE: return hmR0SvmExitVmsave(pVCpu, pCtx, pSvmTransient); 3641 3645 case SVM_EXIT_INVLPGA: return hmR0SvmExitInvlpga(pVCpu, pCtx, pSvmTransient); 3646 case SVM_EXIT_VMRUN: return hmR0SvmExitVmrun(pVCpu, pCtx, pSvmTransient); 3642 3647 #else 3643 3648 case SVM_EXIT_CLGI: … … 3646 3651 case SVM_EXIT_VMSAVE: 3647 3652 case SVM_EXIT_INVLPGA: 3653 case SVM_EXIT_VMRUN: 3648 3654 #endif 3649 3655 case SVM_EXIT_RSM: 3650 case SVM_EXIT_VMRUN:3651 3656 case SVM_EXIT_SKINIT: 3652 3657 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); … … 5809 5814 return VBOXSTRICTRC_VAL(rcStrict); 5810 5815 } 5816 5817 5818 /** 5819 * \#VMEXIT handler for STGI (SVM_EXIT_VMRUN). Conditional \#VMEXIT. 5820 */ 5821 HMSVM_EXIT_DECL hmR0SvmExitVmrun(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 5822 { 5823 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 5824 /** @todo Stat. */ 5825 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmrun); */ 5826 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 5827 VBOXSTRICTRC rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr); 5828 return VBOXSTRICTRC_VAL(rcStrict); 5829 } 5811 5830 #endif /* VBOX_WITH_NESTED_HWVIRT */ 5812 5831 -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r67157 r67529 114 114 #include <VBox/vmm/mm.h> 115 115 #include <VBox/vmm/em.h> 116 #include <VBox/vmm/iem.h> 116 117 #include <VBox/vmm/selm.h> 117 118 #include <VBox/vmm/dbgf.h> … … 173 174 static DECLCALLBACK(void) cpumR3InfoAll(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 174 175 static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 176 static DECLCALLBACK(void) cpumR3InfoGuestHwvirt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 175 177 static DECLCALLBACK(void) cpumR3InfoGuestInstr(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 176 178 static DECLCALLBACK(void) cpumR3InfoHyper(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); … … 981 983 DBGFR3InfoRegisterInternalEx(pVM, "cpumguest", "Displays the guest cpu state.", 982 984 &cpumR3InfoGuest, DBGFINFO_FLAGS_ALL_EMTS); 985 DBGFR3InfoRegisterInternalEx(pVM, "cpumguesthwvirt", "Displays the guest hwvirt. cpu state.", 986 &cpumR3InfoGuestHwvirt, DBGFINFO_FLAGS_ALL_EMTS); 983 987 DBGFR3InfoRegisterInternalEx(pVM, "cpumhyper", "Displays the hypervisor cpu state.", 984 988 &cpumR3InfoHyper, DBGFINFO_FLAGS_ALL_EMTS); … … 2097 2101 cpumR3InfoGuest(pVM, pHlp, pszArgs); 2098 2102 cpumR3InfoGuestInstr(pVM, pHlp, pszArgs); 2103 cpumR3InfoGuestHwvirt(pVM, pHlp, pszArgs); 2099 2104 cpumR3InfoHyper(pVM, pHlp, pszArgs); 2100 2105 cpumR3InfoHost(pVM, pHlp, pszArgs); … … 2148 2153 * @param pVM The cross context VM structure. 2149 2154 * @param pHlp The info helper functions. 2150 * @param pszArgs Arguments , ignored.2155 * @param pszArgs Arguments. 2151 2156 */ 2152 2157 static DECLCALLBACK(void) cpumR3InfoGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs) … … 2166 2171 } 2167 2172 2173 2174 /** 2175 * Display the guest's hardware-virtualization cpu state. 2176 * 2177 * @param pVM The cross context VM structure. 2178 * @param pHlp The info helper functions. 2179 * @param pszArgs Arguments, ignored. 2180 */ 2181 static DECLCALLBACK(void) cpumR3InfoGuestHwvirt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs) 2182 { 2183 RT_NOREF(pszArgs); 2184 2185 PVMCPU pVCpu = VMMGetCpu(pVM); 2186 if (!pVCpu) 2187 pVCpu = &pVM->aCpus[0]; 2188 2189 /* 2190 * Figure out what to dump. 2191 * 2192 * In the future we may need to dump everything whether or not we're actively in nested-guest mode 2193 * or not, hence the reason why we use a mask to determine what needs dumping. Currently, we only 2194 * dump hwvirt. state when the guest CPU is executing a nested-guest. 2195 */ 2196 /** @todo perhaps make this configurable through pszArgs, depending on how much 2197 * noise we wish to accept when nested hwvirt. isn't used. */ 2198 #define CPUMHWVIRTDUMP_NONE (0) 2199 #define CPUMHWVIRTDUMP_SVM RT_BIT(0) 2200 #define CPUMHWVIRTDUMP_VMX RT_BIT(1) 2201 #define CPUMHWVIRTDUMP_COMMON RT_BIT(2) 2202 #define CPUMHWVIRTDUMP_LAST CPUMHWVIRTDUMP_VMX 2203 #define CPUMHWVIRTDUMP_ALL (CPUMHWVIRTDUMP_COMMON | CPUMHWVIRTDUMP_VMX | CPUMHWVIRTDUMP_SVM) 2204 2205 PCPUMCTX pCtx = &pVCpu->cpum.s.Guest; 2206 static const char *const s_aHwvirtModes[] = { "No/inactive", "SVM", "VMX", "Common" }; 2207 uint8_t const idxHwvirtState = CPUMIsGuestInSvmNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_SVM 2208 : CPUMIsGuestInVmxNestedHwVirtMode(pCtx) ? CPUMHWVIRTDUMP_VMX : CPUMHWVIRTDUMP_NONE; 2209 AssertCompile(CPUMHWVIRTDUMP_LAST <= RT_ELEMENTS(s_aHwvirtModes)); 2210 Assert(idxHwvirtState < RT_ELEMENTS(s_aHwvirtModes)); 2211 const char *pcszHwvirtMode = s_aHwvirtModes[idxHwvirtState]; 2212 uint32_t const fDumpState = idxHwvirtState; /* | CPUMHWVIRTDUMP_ALL */ 2213 2214 /* 2215 * Dump it. 2216 */ 2217 pHlp->pfnPrintf(pHlp, "VCPU[%u] hardware virtualization state:\n", pVCpu->idCpu); 2218 2219 if (fDumpState & CPUMHWVIRTDUMP_COMMON) 2220 pHlp->pfnPrintf(pHlp, "fLocalForcedActions = %#RX32\n", pCtx->hwvirt.fLocalForcedActions); 2221 pHlp->pfnPrintf(pHlp, "%s hwvirt state%s\n", pcszHwvirtMode, fDumpState ? ":" : ""); 2222 if (fDumpState & CPUMHWVIRTDUMP_SVM) 2223 { 2224 pHlp->pfnPrintf(pHlp, " uMsrHSavePa = %#RX64\n", pCtx->hwvirt.svm.uMsrHSavePa); 2225 pHlp->pfnPrintf(pHlp, " GCPhysVmcb = %#RGp\n", pCtx->hwvirt.svm.GCPhysVmcb); 2226 pHlp->pfnPrintf(pHlp, " VmcbCtrl:\n"); 2227 HMR3InfoSvmVmcbCtrl(pHlp, &pCtx->hwvirt.svm.VmcbCtrl, " " /* pszPrefix */); 2228 pHlp->pfnPrintf(pHlp, " HostState:\n"); 2229 pHlp->pfnPrintf(pHlp, " uEferMsr = %#RX64\n", pCtx->hwvirt.svm.HostState.uEferMsr); 2230 pHlp->pfnPrintf(pHlp, " uCr0 = %#RX64\n", pCtx->hwvirt.svm.HostState.uCr0); 2231 pHlp->pfnPrintf(pHlp, " uCr4 = %#RX64\n", pCtx->hwvirt.svm.HostState.uCr4); 2232 pHlp->pfnPrintf(pHlp, " uCr3 = %#RX64\n", pCtx->hwvirt.svm.HostState.uCr3); 2233 pHlp->pfnPrintf(pHlp, " uRip = %#RX64\n", pCtx->hwvirt.svm.HostState.uRip); 2234 pHlp->pfnPrintf(pHlp, " uRsp = %#RX64\n", pCtx->hwvirt.svm.HostState.uRsp); 2235 pHlp->pfnPrintf(pHlp, " uRax = %#RX64\n", pCtx->hwvirt.svm.HostState.uRax); 2236 pHlp->pfnPrintf(pHlp, " rflags = %#RX64\n", pCtx->hwvirt.svm.HostState.rflags.u64); 2237 PCPUMSELREG pSel = &pCtx->hwvirt.svm.HostState.es; 2238 pHlp->pfnPrintf(pHlp, " es = {%04x base=%016RX64 limit=%08x flags=%08x}\n", 2239 pSel->Sel, pSel->u64Base, pSel->u32Limit, pSel->fFlags); 2240 pSel = &pCtx->hwvirt.svm.HostState.cs; 2241 pHlp->pfnPrintf(pHlp, " cs = {%04x base=%016RX64 limit=%08x flags=%08x}\n", 2242 pSel->Sel, pSel->u64Base, pSel->u32Limit, pSel->fFlags); 2243 pSel = &pCtx->hwvirt.svm.HostState.ss; 2244 pHlp->pfnPrintf(pHlp, " ss = {%04x base=%016RX64 limit=%08x flags=%08x}\n", 2245 pSel->Sel, pSel->u64Base, pSel->u32Limit, pSel->fFlags); 2246 pSel = &pCtx->hwvirt.svm.HostState.ds; 2247 pHlp->pfnPrintf(pHlp, " ds = {%04x base=%016RX64 limit=%08x flags=%08x}\n", 2248 pSel->Sel, pSel->u64Base, pSel->u32Limit, pSel->fFlags); 2249 pHlp->pfnPrintf(pHlp, " gdtr = %016RX64:%04x\n", pCtx->hwvirt.svm.HostState.gdtr.pGdt, 2250 pCtx->hwvirt.svm.HostState.gdtr.cbGdt); 2251 pHlp->pfnPrintf(pHlp, " idtr = %016RX64:%04x\n", pCtx->hwvirt.svm.HostState.idtr.pIdt, 2252 pCtx->hwvirt.svm.HostState.idtr.cbIdt); 2253 pHlp->pfnPrintf(pHlp, " fGif = %u\n", pCtx->hwvirt.svm.fGif); 2254 pHlp->pfnPrintf(pHlp, " cPauseFilter = %RU16\n", pCtx->hwvirt.svm.cPauseFilter); 2255 pHlp->pfnPrintf(pHlp, " cPauseFilterThreshold = %RU32\n", pCtx->hwvirt.svm.cPauseFilterThreshold); 2256 pHlp->pfnPrintf(pHlp, " fInterceptEvents = %u\n", pCtx->hwvirt.svm.fInterceptEvents); 2257 pHlp->pfnPrintf(pHlp, " pvMsrBitmapR3 = %p\n", pCtx->hwvirt.svm.pvMsrBitmapR3); 2258 pHlp->pfnPrintf(pHlp, " pvMsrBitmapR0 = %RKv\n", pCtx->hwvirt.svm.pvMsrBitmapR0); 2259 pHlp->pfnPrintf(pHlp, " pvIoBitmapR3 = %p\n", pCtx->hwvirt.svm.pvIoBitmapR3); 2260 pHlp->pfnPrintf(pHlp, " pvIoBitmapR0 = %RKv\n", pCtx->hwvirt.svm.pvIoBitmapR0); 2261 } 2262 2263 /** @todo Intel. */ 2264 #if 0 2265 if (fDumpState & CPUMHWVIRTDUMP_VMX) 2266 { 2267 } 2268 #endif 2269 2270 #undef CPUMHWVIRTDUMP_NONE 2271 #undef CPUMHWVIRTDUMP_COMMON 2272 #undef CPUMHWVIRTDUMP_SVM 2273 #undef CPUMHWVIRTDUMP_VMX 2274 #undef CPUMHWVIRTDUMP_LAST 2275 #undef CPUMHWVIRTDUMP_ALL 2276 } 2168 2277 2169 2278 /** -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r67204 r67529 1960 1960 { 1961 1961 bool fIntrEnabled; 1962 #ifdef VBOX_WITH_RAW_MODE 1963 fIntrEnabled = PATMAreInterruptsEnabled(pVM); 1964 #else 1965 fIntrEnabled = true; 1966 #endif 1967 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */ 1962 1968 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 1963 #ifdef VBOX_WITH_RAW_MODE 1964 fIntrEnabled = PATMAreInterruptsEnabled(pVM); RT_NOREF(pCtx); 1965 #elif defined(VBOX_WITH_NESTED_HWVIRT) 1966 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1967 fIntrEnabled = HMSvmNstGstCanTakePhysInterrupt(pVCpu, pCtx); 1968 else 1969 fIntrEnabled = pCtx->eflags.Bits.u1IF; 1969 #ifdef VBOX_WITH_NESTED_HWVIRT 1970 fIntrEnabled &= pCtx->hwvirt.svm.fGif; 1971 if (fIntrEnabled) 1972 { 1973 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1974 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pCtx); 1975 else 1976 fIntrEnabled = pCtx->eflags.Bits.u1IF; 1977 } 1970 1978 #else 1971 1979 fIntrEnabled = pCtx->eflags.Bits.u1IF; … … 1980 1988 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_INTR)) 1981 1989 { 1982 VBOXSTRICTRC rcStrict = HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_INTR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);1990 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 1983 1991 if (rcStrict == VINF_SVM_VMEXIT) 1984 1992 rc2 = VINF_EM_RESCHEDULE; … … 1997 2005 /** @todo this really isn't nice, should properly handle this */ 1998 2006 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT); 1999 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW)) 2007 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM 2008 || rc2 == VINF_EM_RESCHEDULE_HM 2009 || rc2 == VINF_EM_RESCHEDULE_RAW)) 2000 2010 rc2 = VINF_EM_RESCHEDULE; 2001 2011 #ifdef VBOX_STRICT … … 2013 2023 * Check nested-guest virtual interrupts. 2014 2024 */ 2015 if ( HMSvmNstGstCanTakeVirtInterrupt(pVCpu,pCtx))2025 if (CPUMCanSvmNstGstTakeVirtIntr(pCtx)) 2016 2026 { 2017 2027 if (CPUMIsGuestSvmCtrlInterceptSet(pCtx, SVM_CTRL_INTERCEPT_VINTR)) 2018 2028 { 2019 VBOXSTRICTRC rcStrict = HMSvmNstGstVmExit(pVCpu, pCtx, SVM_EXIT_VINTR, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);2029 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0); 2020 2030 if (rcStrict == VINF_SVM_VMEXIT) 2021 2031 rc2 = VINF_EM_RESCHEDULE; … … 2034 2044 */ 2035 2045 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST); 2036 uint8_t uNstGstVector = HMSvmNstGstGetInterrupt(pCtx);2046 uint8_t uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx); 2037 2047 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT); 2048 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector)); 2038 2049 /** @todo reschedule to HM/REM later, when the HMR0 nested-guest execution is 2039 2050 * done. For now just reschedule to IEM. */ -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r66684 r67529 47 47 #include <VBox/vmm/dbgf.h> 48 48 #include <VBox/vmm/iom.h> 49 #include <VBox/vmm/iem.h> 49 50 #include <VBox/vmm/patm.h> 50 51 #include <VBox/vmm/csam.h> … … 2682 2683 Assert(HMIsEnabled(pVM)); 2683 2684 2685 #if defined(VBOX_WITH_NESTED_HWVIRT) && defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) 2686 if (CPUMIsGuestInNestedHwVirtMode(pCtx)) 2687 { 2688 Log(("HMR3CanExecuteGuest: In nested-guest mode - returning false")); 2689 return false; 2690 } 2691 #endif 2692 2684 2693 /* If we're still executing the IO code, then return false. */ 2685 2694 if ( RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled) … … 3593 3602 } 3594 3603 3604 3605 /** 3606 * Displays SVM VMCB controls. 3607 * 3608 * @param pHlp The info helper functions. 3609 * @param pVmcbCtrl Pointer to a SVM VMCB controls area. 3610 * @param pszPrefix Caller specified string prefix. 3611 */ 3612 VMMR3_INT_DECL(void) HMR3InfoSvmVmcbCtrl(PCDBGFINFOHLP pHlp, PCSVMVMCBCTRL pVmcbCtrl, const char *pszPrefix) 3613 { 3614 AssertReturnVoid(pHlp); 3615 AssertReturnVoid(pVmcbCtrl); 3616 3617 pHlp->pfnPrintf(pHlp, "%su16InterceptRdCRx = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptRdCRx); 3618 pHlp->pfnPrintf(pHlp, "%su16InterceptWrCRx = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptWrCRx); 3619 pHlp->pfnPrintf(pHlp, "%su16InterceptRdDRx = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptRdDRx); 3620 pHlp->pfnPrintf(pHlp, "%su16InterceptWrDRx = %#RX16\n", pszPrefix, pVmcbCtrl->u16InterceptWrDRx); 3621 pHlp->pfnPrintf(pHlp, "%su32InterceptXcpt = %#RX32\n", pszPrefix, pVmcbCtrl->u32InterceptXcpt); 3622 pHlp->pfnPrintf(pHlp, "%su64InterceptCtrl = %#RX64\n", pszPrefix, pVmcbCtrl->u64InterceptCtrl); 3623 pHlp->pfnPrintf(pHlp, "%su16PauseFilterThreshold = %#RX16\n", pszPrefix, pVmcbCtrl->u16PauseFilterThreshold); 3624 pHlp->pfnPrintf(pHlp, "%su16PauseFilterCount = %#RX16\n", pszPrefix, pVmcbCtrl->u16PauseFilterCount); 3625 pHlp->pfnPrintf(pHlp, "%su64IOPMPhysAddr = %#RX64\n", pszPrefix, pVmcbCtrl->u64IOPMPhysAddr); 3626 pHlp->pfnPrintf(pHlp, "%su64MSRPMPhysAddr = %#RX64\n", pszPrefix, pVmcbCtrl->u64MSRPMPhysAddr); 3627 pHlp->pfnPrintf(pHlp, "%su64TSCOffset = %#RX64\n", pszPrefix, pVmcbCtrl->u64TSCOffset); 3628 pHlp->pfnPrintf(pHlp, "%sTLBCtrl\n", pszPrefix); 3629 pHlp->pfnPrintf(pHlp, "%s u32ASID = %#RX32\n", pszPrefix, pVmcbCtrl->TLBCtrl.n.u32ASID); 3630 pHlp->pfnPrintf(pHlp, "%s u8TLBFlush = %u\n", pszPrefix, pVmcbCtrl->TLBCtrl.n.u8TLBFlush); 3631 pHlp->pfnPrintf(pHlp, "%sIntCtrl\n", pszPrefix); 3632 pHlp->pfnPrintf(pHlp, "%s u8VTPR = %#RX8 (%u)\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u8VTPR, pVmcbCtrl->IntCtrl.n.u8VTPR); 3633 pHlp->pfnPrintf(pHlp, "%s u1VIrqPending = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1VIrqPending); 3634 pHlp->pfnPrintf(pHlp, "%s u4VIntrPrio = %#RX8\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u4VIntrPrio); 3635 pHlp->pfnPrintf(pHlp, "%s u1IgnoreTPR = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1IgnoreTPR); 3636 pHlp->pfnPrintf(pHlp, "%s u1VIntrMasking = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1VIntrMasking); 3637 pHlp->pfnPrintf(pHlp, "%s u1AvicEnable = %RTbool\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u1AvicEnable); 3638 pHlp->pfnPrintf(pHlp, "%s u8VIntrVector = %#RX8\n", pszPrefix, pVmcbCtrl->IntCtrl.n.u8VIntrVector); 3639 pHlp->pfnPrintf(pHlp, "%su64IntShadow = %#RX64\n", pszPrefix, pVmcbCtrl->u64IntShadow); 3640 pHlp->pfnPrintf(pHlp, "%su64ExitCode = %#RX64\n", pszPrefix, pVmcbCtrl->u64ExitCode); 3641 pHlp->pfnPrintf(pHlp, "%su64ExitInfo1 = %#RX64\n", pszPrefix, pVmcbCtrl->u64ExitInfo1); 3642 pHlp->pfnPrintf(pHlp, "%su64ExitInfo2 = %#RX64\n", pszPrefix, pVmcbCtrl->u64ExitInfo2); 3643 pHlp->pfnPrintf(pHlp, "%sExitIntInfo\n", pszPrefix); 3644 pHlp->pfnPrintf(pHlp, "%s u8Vector = %#RX8 (%u)\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u8Vector, pVmcbCtrl->ExitIntInfo.n.u8Vector); 3645 pHlp->pfnPrintf(pHlp, "%s u3Type = %u\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u3Type); 3646 pHlp->pfnPrintf(pHlp, "%s u1ErrorCodeValid = %RTbool\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u1ErrorCodeValid); 3647 pHlp->pfnPrintf(pHlp, "%s u1Valid = %RTbool\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u1Valid); 3648 pHlp->pfnPrintf(pHlp, "%s u32ErrorCode = %#RX32\n", pszPrefix, pVmcbCtrl->ExitIntInfo.n.u32ErrorCode); 3649 pHlp->pfnPrintf(pHlp, "%sNestedPaging\n", pszPrefix); 3650 pHlp->pfnPrintf(pHlp, "%s u1NestedPaging = %RTbool\n", pszPrefix, pVmcbCtrl->NestedPaging.n.u1NestedPaging); 3651 pHlp->pfnPrintf(pHlp, "%sAvicBar\n", pszPrefix); 3652 pHlp->pfnPrintf(pHlp, "%s u40Addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicBar.n.u40Addr); 3653 pHlp->pfnPrintf(pHlp, "%sEventInject\n", pszPrefix); 3654 pHlp->pfnPrintf(pHlp, "%s EventInject\n", pszPrefix); 3655 pHlp->pfnPrintf(pHlp, "%s u8Vector = %#RX32 (%u)\n", pszPrefix, pVmcbCtrl->EventInject.n.u8Vector, pVmcbCtrl->EventInject.n.u8Vector); 3656 pHlp->pfnPrintf(pHlp, "%s u3Type = %u\n", pszPrefix, pVmcbCtrl->EventInject.n.u3Type); 3657 pHlp->pfnPrintf(pHlp, "%s u1ErrorCodeValid = %RTbool\n", pszPrefix, pVmcbCtrl->EventInject.n.u1ErrorCodeValid); 3658 pHlp->pfnPrintf(pHlp, "%s u1Valid = %RTbool\n", pszPrefix, pVmcbCtrl->EventInject.n.u1Valid); 3659 pHlp->pfnPrintf(pHlp, "%s u32ErrorCode = %#RX32\n", pszPrefix, pVmcbCtrl->EventInject.n.u32ErrorCode); 3660 pHlp->pfnPrintf(pHlp, "%su64NestedPagingCR3 = %#RX64\n", pszPrefix, pVmcbCtrl->u64NestedPagingCR3); 3661 pHlp->pfnPrintf(pHlp, "%su64LBRVirt = %#RX64\n", pszPrefix, pVmcbCtrl->u64LBRVirt); 3662 pHlp->pfnPrintf(pHlp, "%su64VmcbCleanBits = %#RX64\n", pszPrefix, pVmcbCtrl->u64VmcbCleanBits); 3663 pHlp->pfnPrintf(pHlp, "%su64NextRIP = %#RX64\n", pszPrefix, pVmcbCtrl->u64NextRIP); 3664 pHlp->pfnPrintf(pHlp, "%scbInstrFetched = %u\n", pszPrefix, pVmcbCtrl->cbInstrFetched); 3665 pHlp->pfnPrintf(pHlp, "%sabInstr = %.*Rhxs\n", pszPrefix, sizeof(pVmcbCtrl->abInstr), pVmcbCtrl->abInstr); 3666 pHlp->pfnPrintf(pHlp, "%sAvicBackingPagePtr\n", pszPrefix); 3667 pHlp->pfnPrintf(pHlp, "%s u40Addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicBackingPagePtr.n.u40Addr); 3668 pHlp->pfnPrintf(pHlp, "%sAvicLogicalTablePtr\n", pszPrefix); 3669 pHlp->pfnPrintf(pHlp, "%s u40Addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicLogicalTablePtr.n.u40Addr); 3670 pHlp->pfnPrintf(pHlp, "%sAvicPhysicalTablePtr\n", pszPrefix); 3671 pHlp->pfnPrintf(pHlp, "%s u8LastGuestCoreId = %u\n", pszPrefix, pVmcbCtrl->AvicPhysicalTablePtr.n.u8LastGuestCoreId); 3672 pHlp->pfnPrintf(pHlp, "%s u40Addr = %#RX64\n", pszPrefix, pVmcbCtrl->AvicPhysicalTablePtr.n.u40Addr); 3673 } 3674 -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r66096 r67529 2309 2309 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp()); 2310 2310 RTLogRelPrintf("***\n"); 2311 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp()); 2312 RTLogRelPrintf("***\n"); 2311 2313 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp()); 2312 2314 RTLogRelPrintf("***\n"); … … 2341 2343 RTLogRelPrintf("****************** Guest state at power off for VCpu %u ******************\n", pVCpu->idCpu); 2342 2344 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", DBGFR3InfoLogRelHlp()); 2345 RTLogRelPrintf("***\n"); 2346 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguesthwvirt", "verbose", DBGFR3InfoLogRelHlp()); 2343 2347 RTLogRelPrintf("***\n"); 2344 2348 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "mode", NULL, DBGFR3InfoLogRelHlp()); -
trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
r65650 r67529 654 654 DBGFR3Info(pVM->pUVM, "cpumguest", NULL, pHlp); 655 655 DBGFR3Info(pVM->pUVM, "cpumguestinstr", NULL, pHlp); 656 DBGFR3Info(pVM->pUVM, "cpumguesthwvirt", NULL, pHlp); 656 657 break; 657 658 } … … 675 676 } const aInfo[] = 676 677 { 677 { "mappings", NULL }, 678 { "hma", NULL }, 679 { "cpumguest", "verbose" }, 680 { "cpumguestinstr", "verbose" }, 681 { "cpumhyper", "verbose" }, 682 { "cpumhost", "verbose" }, 683 { "mode", "all" }, 684 { "cpuid", "verbose" }, 685 { "handlers", "phys virt hyper stats" }, 686 { "timers", NULL }, 687 { "activetimers", NULL }, 678 { "mappings", NULL }, 679 { "hma", NULL }, 680 { "cpumguest", "verbose" }, 681 { "cpumguesthwvirt", "verbose" }, 682 { "cpumguestinstr", "verbose" }, 683 { "cpumhyper", "verbose" }, 684 { "cpumhost", "verbose" }, 685 { "mode", "all" }, 686 { "cpuid", "verbose" }, 687 { "handlers", "phys virt hyper stats" }, 688 { "timers", NULL }, 689 { "activetimers", NULL }, 688 690 }; 689 691 for (unsigned i = 0; i < RT_ELEMENTS(aInfo); i++) … … 702 704 DBGFR3InfoMulti(pVM, 703 705 "*", 704 "mappings|hma|cpum|cpumguest|cpumguest instr|cpumhyper|cpumhost|mode|cpuid"706 "mappings|hma|cpum|cpumguest|cpumguesthwvirt|cpumguestinstr|cpumhyper|cpumhost|mode|cpuid" 705 707 "|pgmpd|pgmcr3|timers|activetimers|handlers|help", 706 708 "!!\n" -
trunk/src/VBox/VMM/include/HMInternal.h
r66356 r67529 1102 1102 AssertCompileMemberAlignment(HMCPU, Event, 8); 1103 1103 1104 VMM_INT_DECL(TRPMEVENT) hmSvmEventToTrpmEventType(PCSVMEVENT pSvmEvent);1105 VMM_INT_DECL(int) hmSvmGetMsrpmOffsetAndBit(uint32_t idMsr, uint16_t *pbOffMsrpm, uint32_t *puMsrpmBit);1106 1107 1108 1104 #ifdef IN_RING0 1109 1105 VMMR0_INT_DECL(PHMGLOBALCPUINFO) hmR0GetCurrentCpu(void);
Note:
See TracChangeset
for help on using the changeset viewer.