Changeset 93748 in vbox for trunk/src/VBox
- Timestamp:
- Feb 15, 2022 12:20:46 PM (3 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/VMXAllTemplate.cpp.h
r93583 r93748 10212 10212 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 10213 10213 10214 10215 /** @name Execution loop for single stepping, DBGF events and expensive Dtrace 10216 * probes. 10217 * 10218 * The following few functions and associated structure contains the bloat 10219 * necessary for providing detailed debug events and dtrace probes as well as 10220 * reliable host side single stepping. This works on the principle of 10221 * "subclassing" the normal execution loop and workers. We replace the loop 10222 * method completely and override selected helpers to add necessary adjustments 10223 * to their core operation. 10224 * 10225 * The goal is to keep the "parent" code lean and mean, so as not to sacrifice 10226 * any performance for debug and analysis features. 10227 * 10228 * @{ 10229 */ 10230 10231 /** 10232 * Transient per-VCPU debug state of VMCS and related info. we save/restore in 10233 * the debug run loop. 10234 */ 10235 typedef struct VMXRUNDBGSTATE 10236 { 10237 /** The RIP we started executing at. This is for detecting that we stepped. */ 10238 uint64_t uRipStart; 10239 /** The CS we started executing with. */ 10240 uint16_t uCsStart; 10241 10242 /** Whether we've actually modified the 1st execution control field. */ 10243 bool fModifiedProcCtls : 1; 10244 /** Whether we've actually modified the 2nd execution control field. */ 10245 bool fModifiedProcCtls2 : 1; 10246 /** Whether we've actually modified the exception bitmap. */ 10247 bool fModifiedXcptBitmap : 1; 10248 10249 /** We desire the modified the CR0 mask to be cleared. */ 10250 bool fClearCr0Mask : 1; 10251 /** We desire the modified the CR4 mask to be cleared. */ 10252 bool fClearCr4Mask : 1; 10253 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */ 10254 uint32_t fCpe1Extra; 10255 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */ 10256 uint32_t fCpe1Unwanted; 10257 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */ 10258 uint32_t fCpe2Extra; 10259 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */ 10260 uint32_t bmXcptExtra; 10261 /** The sequence number of the Dtrace provider settings the state was 10262 * configured against. */ 10263 uint32_t uDtraceSettingsSeqNo; 10264 /** VM-exits to check (one bit per VM-exit). */ 10265 uint32_t bmExitsToCheck[3]; 10266 10267 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */ 10268 uint32_t fProcCtlsInitial; 10269 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */ 10270 uint32_t fProcCtls2Initial; 10271 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */ 10272 uint32_t bmXcptInitial; 10273 } VMXRUNDBGSTATE; 10274 AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4); 10275 typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE; 10276 10277 10278 /** 10279 * Initializes the VMXRUNDBGSTATE structure. 10280 * 10281 * @param pVCpu The cross context virtual CPU structure of the 10282 * calling EMT. 10283 * @param pVmxTransient The VMX-transient structure. 10284 * @param pDbgState The debug state to initialize. 10285 */ 10286 static void vmxHCRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 10287 { 10288 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip; 10289 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel; 10290 10291 pDbgState->fModifiedProcCtls = false; 10292 pDbgState->fModifiedProcCtls2 = false; 10293 pDbgState->fModifiedXcptBitmap = false; 10294 pDbgState->fClearCr0Mask = false; 10295 pDbgState->fClearCr4Mask = false; 10296 pDbgState->fCpe1Extra = 0; 10297 pDbgState->fCpe1Unwanted = 0; 10298 pDbgState->fCpe2Extra = 0; 10299 pDbgState->bmXcptExtra = 0; 10300 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls; 10301 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2; 10302 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap; 10303 } 10304 10305 10306 /** 10307 * Updates the VMSC fields with changes requested by @a pDbgState. 10308 * 10309 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well 10310 * immediately before executing guest code, i.e. when interrupts are disabled. 10311 * We don't check status codes here as we cannot easily assert or return in the 10312 * latter case. 10313 * 10314 * @param pVCpu The cross context virtual CPU structure. 10315 * @param pVmxTransient The VMX-transient structure. 10316 * @param pDbgState The debug state. 10317 */ 10318 static void vmxHCPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 10319 { 10320 /* 10321 * Ensure desired flags in VMCS control fields are set. 10322 * (Ignoring write failure here, as we're committed and it's just debug extras.) 10323 * 10324 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so 10325 * there should be no stale data in pCtx at this point. 10326 */ 10327 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10328 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra 10329 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted)) 10330 { 10331 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra; 10332 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted; 10333 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls); 10334 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls)); 10335 pDbgState->fModifiedProcCtls = true; 10336 } 10337 10338 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra) 10339 { 10340 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra; 10341 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2); 10342 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2)); 10343 pDbgState->fModifiedProcCtls2 = true; 10344 } 10345 10346 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra) 10347 { 10348 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra; 10349 VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap); 10350 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap)); 10351 pDbgState->fModifiedXcptBitmap = true; 10352 } 10353 10354 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0) 10355 { 10356 pVmcsInfo->u64Cr0Mask = 0; 10357 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR0_MASK, 0); 10358 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n")); 10359 } 10360 10361 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0) 10362 { 10363 pVmcsInfo->u64Cr4Mask = 0; 10364 VMX_VMCS_WRITE_NW(pVCpu, VMX_VMCS_CTRL_CR4_MASK, 0); 10365 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n")); 10366 } 10367 10368 NOREF(pVCpu); 10369 } 10370 10371 10372 /** 10373 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for 10374 * re-entry next time around. 10375 * 10376 * @returns Strict VBox status code (i.e. informational status codes too). 10377 * @param pVCpu The cross context virtual CPU structure. 10378 * @param pVmxTransient The VMX-transient structure. 10379 * @param pDbgState The debug state. 10380 * @param rcStrict The return code from executing the guest using single 10381 * stepping. 10382 */ 10383 static VBOXSTRICTRC vmxHCRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState, 10384 VBOXSTRICTRC rcStrict) 10385 { 10386 /* 10387 * Restore VM-exit control settings as we may not reenter this function the 10388 * next time around. 10389 */ 10390 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo; 10391 10392 /* We reload the initial value, trigger what we can of recalculations the 10393 next time around. From the looks of things, that's all that's required atm. */ 10394 if (pDbgState->fModifiedProcCtls) 10395 { 10396 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu)) 10397 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */ 10398 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial); 10399 AssertRC(rc2); 10400 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial; 10401 } 10402 10403 /* We're currently the only ones messing with this one, so just restore the 10404 cached value and reload the field. */ 10405 if ( pDbgState->fModifiedProcCtls2 10406 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial) 10407 { 10408 int rc2 = VMX_VMCS_WRITE_32(pVCpu, VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial); 10409 AssertRC(rc2); 10410 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial; 10411 } 10412 10413 /* If we've modified the exception bitmap, we restore it and trigger 10414 reloading and partial recalculation the next time around. */ 10415 if (pDbgState->fModifiedXcptBitmap) 10416 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial; 10417 10418 return rcStrict; 10419 } 10420 10421 10422 /** 10423 * Configures VM-exit controls for current DBGF and DTrace settings. 10424 * 10425 * This updates @a pDbgState and the VMCS execution control fields to reflect 10426 * the necessary VM-exits demanded by DBGF and DTrace. 10427 * 10428 * @param pVCpu The cross context virtual CPU structure. 10429 * @param pVmxTransient The VMX-transient structure. May update 10430 * fUpdatedTscOffsettingAndPreemptTimer. 10431 * @param pDbgState The debug state. 10432 */ 10433 static void vmxHCPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 10434 { 10435 #ifndef IN_NEM_DARWIN 10436 /* 10437 * Take down the dtrace serial number so we can spot changes. 10438 */ 10439 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO(); 10440 ASMCompilerBarrier(); 10441 #endif 10442 10443 /* 10444 * We'll rebuild most of the middle block of data members (holding the 10445 * current settings) as we go along here, so start by clearing it all. 10446 */ 10447 pDbgState->bmXcptExtra = 0; 10448 pDbgState->fCpe1Extra = 0; 10449 pDbgState->fCpe1Unwanted = 0; 10450 pDbgState->fCpe2Extra = 0; 10451 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++) 10452 pDbgState->bmExitsToCheck[i] = 0; 10453 10454 /* 10455 * Software interrupts (INT XXh) - no idea how to trigger these... 10456 */ 10457 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 10458 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE) 10459 || VBOXVMM_INT_SOFTWARE_ENABLED()) 10460 { 10461 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI); 10462 } 10463 10464 /* 10465 * INT3 breakpoints - triggered by #BP exceptions. 10466 */ 10467 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0) 10468 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP); 10469 10470 /* 10471 * Exception bitmap and XCPT events+probes. 10472 */ 10473 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++) 10474 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt))) 10475 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt); 10476 10477 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE); 10478 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB); 10479 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP); 10480 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF); 10481 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR); 10482 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD); 10483 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM); 10484 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF); 10485 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS); 10486 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP); 10487 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS); 10488 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP); 10489 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF); 10490 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF); 10491 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC); 10492 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF); 10493 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE); 10494 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX); 10495 10496 if (pDbgState->bmXcptExtra) 10497 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI); 10498 10499 /* 10500 * Process events and probes for VM-exits, making sure we get the wanted VM-exits. 10501 * 10502 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does. 10503 * So, when adding/changing/removing please don't forget to update it. 10504 * 10505 * Some of the macros are picking up local variables to save horizontal space, 10506 * (being able to see it in a table is the lesser evil here). 10507 */ 10508 #define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \ 10509 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \ 10510 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() ) 10511 #define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \ 10512 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \ 10513 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \ 10514 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \ 10515 } else do { } while (0) 10516 #define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \ 10517 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \ 10518 { \ 10519 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \ 10520 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \ 10521 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \ 10522 } else do { } while (0) 10523 #define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \ 10524 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \ 10525 { \ 10526 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \ 10527 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \ 10528 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \ 10529 } else do { } while (0) 10530 #define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \ 10531 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \ 10532 { \ 10533 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \ 10534 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \ 10535 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \ 10536 } else do { } while (0) 10537 10538 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */ 10539 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */ 10540 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */ 10541 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */ 10542 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */ 10543 10544 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */ 10545 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID); 10546 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */ 10547 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC); 10548 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */ 10549 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT); 10550 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */ 10551 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD); 10552 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT); 10553 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG); 10554 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT); 10555 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC); 10556 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT); 10557 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC); 10558 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */ 10559 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM); 10560 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */ 10561 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL); 10562 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */ 10563 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); 10564 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */ 10565 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); 10566 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */ 10567 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); 10568 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */ 10569 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST); 10570 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */ 10571 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD); 10572 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */ 10573 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME); 10574 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */ 10575 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE); 10576 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */ 10577 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF); 10578 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */ 10579 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON); 10580 10581 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ) 10582 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE)) 10583 { 10584 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR4 10585 | CPUMCTX_EXTRN_APIC_TPR); 10586 AssertRC(rc); 10587 10588 #if 0 /** @todo fix me */ 10589 pDbgState->fClearCr0Mask = true; 10590 pDbgState->fClearCr4Mask = true; 10591 #endif 10592 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)) 10593 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT; 10594 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE)) 10595 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT; 10596 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */ 10597 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would 10598 require clearing here and in the loop if we start using it. */ 10599 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX); 10600 } 10601 else 10602 { 10603 if (pDbgState->fClearCr0Mask) 10604 { 10605 pDbgState->fClearCr0Mask = false; 10606 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR0); 10607 } 10608 if (pDbgState->fClearCr4Mask) 10609 { 10610 pDbgState->fClearCr4Mask = false; 10611 ASMAtomicUoOrU64(&VCPU_2_VMXSTATE(pVCpu).fCtxChanged, HM_CHANGED_GUEST_CR4); 10612 } 10613 } 10614 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX); 10615 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX); 10616 10617 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ) 10618 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE)) 10619 { 10620 /** @todo later, need to fix handler as it assumes this won't usually happen. */ 10621 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX); 10622 } 10623 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX); 10624 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX); 10625 10626 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */ 10627 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR); 10628 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); 10629 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR); 10630 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */ 10631 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT); 10632 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */ 10633 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR); 10634 #if 0 /** @todo too slow, fix handler. */ 10635 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT); 10636 #endif 10637 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE); 10638 10639 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT) 10640 || IS_EITHER_ENABLED(pVM, INSTR_SIDT) 10641 || IS_EITHER_ENABLED(pVM, INSTR_LGDT) 10642 || IS_EITHER_ENABLED(pVM, INSTR_LIDT)) 10643 { 10644 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT; 10645 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS); 10646 } 10647 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS); 10648 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS); 10649 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS); 10650 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS); 10651 10652 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT) 10653 || IS_EITHER_ENABLED(pVM, INSTR_STR) 10654 || IS_EITHER_ENABLED(pVM, INSTR_LLDT) 10655 || IS_EITHER_ENABLED(pVM, INSTR_LTR)) 10656 { 10657 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT; 10658 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS); 10659 } 10660 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS); 10661 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS); 10662 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS); 10663 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS); 10664 10665 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */ 10666 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT); 10667 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT); 10668 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP); 10669 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */ 10670 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID); 10671 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT); 10672 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD); 10673 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */ 10674 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV); 10675 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT); 10676 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND); 10677 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT); 10678 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID); 10679 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */ 10680 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC); 10681 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT); 10682 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED); 10683 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */ 10684 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES); 10685 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */ 10686 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS); 10687 10688 #undef IS_EITHER_ENABLED 10689 #undef SET_ONLY_XBM_IF_EITHER_EN 10690 #undef SET_CPE1_XBM_IF_EITHER_EN 10691 #undef SET_CPEU_XBM_IF_EITHER_EN 10692 #undef SET_CPE2_XBM_IF_EITHER_EN 10693 10694 /* 10695 * Sanitize the control stuff. 10696 */ 10697 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1; 10698 if (pDbgState->fCpe2Extra) 10699 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS; 10700 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1; 10701 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0; 10702 #ifndef IN_NEM_DARWIN /** @todo */ 10703 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT)) 10704 { 10705 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true; 10706 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false; 10707 } 10708 #endif 10709 10710 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n", 10711 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra, 10712 pDbgState->fClearCr0Mask ? " clr-cr0" : "", 10713 pDbgState->fClearCr4Mask ? " clr-cr4" : "")); 10714 } 10715 10716 10717 /** 10718 * Fires off DBGF events and dtrace probes for a VM-exit, when it's 10719 * appropriate. 10720 * 10721 * The caller has checked the VM-exit against the 10722 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs 10723 * already, so we don't have to do that either. 10724 * 10725 * @returns Strict VBox status code (i.e. informational status codes too). 10726 * @param pVCpu The cross context virtual CPU structure. 10727 * @param pVmxTransient The VMX-transient structure. 10728 * @param uExitReason The VM-exit reason. 10729 * 10730 * @remarks The name of this function is displayed by dtrace, so keep it short 10731 * and to the point. No longer than 33 chars long, please. 10732 */ 10733 static VBOXSTRICTRC vmxHCHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason) 10734 { 10735 /* 10736 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the 10737 * same time check whether any corresponding Dtrace event is enabled (fDtrace). 10738 * 10739 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate 10740 * does. Must add/change/remove both places. Same ordering, please. 10741 * 10742 * Added/removed events must also be reflected in the next section 10743 * where we dispatch dtrace events. 10744 */ 10745 bool fDtrace1 = false; 10746 bool fDtrace2 = false; 10747 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END; 10748 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END; 10749 uint32_t uEventArg = 0; 10750 #define SET_EXIT(a_EventSubName) \ 10751 do { \ 10752 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \ 10753 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \ 10754 } while (0) 10755 #define SET_BOTH(a_EventSubName) \ 10756 do { \ 10757 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \ 10758 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \ 10759 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \ 10760 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \ 10761 } while (0) 10762 switch (uExitReason) 10763 { 10764 case VMX_EXIT_MTF: 10765 return vmxHCExitMtf(pVCpu, pVmxTransient); 10766 10767 case VMX_EXIT_XCPT_OR_NMI: 10768 { 10769 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo); 10770 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo)) 10771 { 10772 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT: 10773 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT: 10774 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT: 10775 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST)) 10776 { 10777 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo)) 10778 { 10779 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient); 10780 uEventArg = pVmxTransient->uExitIntErrorCode; 10781 } 10782 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector); 10783 switch (enmEvent1) 10784 { 10785 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break; 10786 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break; 10787 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break; 10788 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break; 10789 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break; 10790 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break; 10791 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break; 10792 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break; 10793 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break; 10794 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break; 10795 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break; 10796 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break; 10797 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break; 10798 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break; 10799 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break; 10800 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break; 10801 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break; 10802 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break; 10803 default: break; 10804 } 10805 } 10806 else 10807 AssertFailed(); 10808 break; 10809 10810 case VMX_EXIT_INT_INFO_TYPE_SW_INT: 10811 uEventArg = idxVector; 10812 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE; 10813 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED(); 10814 break; 10815 } 10816 break; 10817 } 10818 10819 case VMX_EXIT_TRIPLE_FAULT: 10820 enmEvent1 = DBGFEVENT_TRIPLE_FAULT; 10821 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED(); 10822 break; 10823 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break; 10824 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break; 10825 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break; 10826 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break; 10827 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break; 10828 10829 /* Instruction specific VM-exits: */ 10830 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break; 10831 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break; 10832 case VMX_EXIT_HLT: SET_BOTH(HALT); break; 10833 case VMX_EXIT_INVD: SET_BOTH(INVD); break; 10834 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break; 10835 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break; 10836 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break; 10837 case VMX_EXIT_RSM: SET_BOTH(RSM); break; 10838 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break; 10839 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break; 10840 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break; 10841 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break; 10842 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break; 10843 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break; 10844 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break; 10845 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break; 10846 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break; 10847 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break; 10848 case VMX_EXIT_MOV_CRX: 10849 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 10850 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ) 10851 SET_BOTH(CRX_READ); 10852 else 10853 SET_BOTH(CRX_WRITE); 10854 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual); 10855 break; 10856 case VMX_EXIT_MOV_DRX: 10857 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 10858 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual) 10859 == VMX_EXIT_QUAL_DRX_DIRECTION_READ) 10860 SET_BOTH(DRX_READ); 10861 else 10862 SET_BOTH(DRX_WRITE); 10863 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual); 10864 break; 10865 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break; 10866 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break; 10867 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break; 10868 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break; 10869 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break; 10870 case VMX_EXIT_GDTR_IDTR_ACCESS: 10871 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient); 10872 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID)) 10873 { 10874 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break; 10875 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break; 10876 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break; 10877 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break; 10878 } 10879 break; 10880 10881 case VMX_EXIT_LDTR_TR_ACCESS: 10882 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient); 10883 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID)) 10884 { 10885 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break; 10886 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break; 10887 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break; 10888 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break; 10889 } 10890 break; 10891 10892 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break; 10893 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break; 10894 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break; 10895 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break; 10896 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break; 10897 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break; 10898 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break; 10899 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break; 10900 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break; 10901 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break; 10902 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break; 10903 10904 /* Events that aren't relevant at this point. */ 10905 case VMX_EXIT_EXT_INT: 10906 case VMX_EXIT_INT_WINDOW: 10907 case VMX_EXIT_NMI_WINDOW: 10908 case VMX_EXIT_TPR_BELOW_THRESHOLD: 10909 case VMX_EXIT_PREEMPT_TIMER: 10910 case VMX_EXIT_IO_INSTR: 10911 break; 10912 10913 /* Errors and unexpected events. */ 10914 case VMX_EXIT_INIT_SIGNAL: 10915 case VMX_EXIT_SIPI: 10916 case VMX_EXIT_IO_SMI: 10917 case VMX_EXIT_SMI: 10918 case VMX_EXIT_ERR_INVALID_GUEST_STATE: 10919 case VMX_EXIT_ERR_MSR_LOAD: 10920 case VMX_EXIT_ERR_MACHINE_CHECK: 10921 case VMX_EXIT_PML_FULL: 10922 case VMX_EXIT_VIRTUALIZED_EOI: 10923 break; 10924 10925 default: 10926 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason)); 10927 break; 10928 } 10929 #undef SET_BOTH 10930 #undef SET_EXIT 10931 10932 /* 10933 * Dtrace tracepoints go first. We do them here at once so we don't 10934 * have to copy the guest state saving and stuff a few dozen times. 10935 * Down side is that we've got to repeat the switch, though this time 10936 * we use enmEvent since the probes are a subset of what DBGF does. 10937 */ 10938 if (fDtrace1 || fDtrace2) 10939 { 10940 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 10941 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 10942 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 10943 switch (enmEvent1) 10944 { 10945 /** @todo consider which extra parameters would be helpful for each probe. */ 10946 case DBGFEVENT_END: break; 10947 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break; 10948 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break; 10949 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break; 10950 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break; 10951 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break; 10952 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break; 10953 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break; 10954 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break; 10955 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break; 10956 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break; 10957 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break; 10958 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break; 10959 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break; 10960 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break; 10961 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break; 10962 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break; 10963 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break; 10964 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break; 10965 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break; 10966 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break; 10967 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break; 10968 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break; 10969 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break; 10970 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break; 10971 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break; 10972 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break; 10973 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break; 10974 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 10975 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 10976 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 10977 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 10978 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break; 10979 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx, 10980 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break; 10981 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break; 10982 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break; 10983 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break; 10984 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break; 10985 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break; 10986 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break; 10987 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break; 10988 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break; 10989 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break; 10990 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break; 10991 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break; 10992 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break; 10993 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break; 10994 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break; 10995 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break; 10996 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break; 10997 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break; 10998 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break; 10999 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break; 11000 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break; 11001 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break; 11002 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break; 11003 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break; 11004 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break; 11005 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break; 11006 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break; 11007 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break; 11008 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break; 11009 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break; 11010 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break; 11011 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break; 11012 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break; 11013 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break; 11014 } 11015 switch (enmEvent2) 11016 { 11017 /** @todo consider which extra parameters would be helpful for each probe. */ 11018 case DBGFEVENT_END: break; 11019 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break; 11020 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break; 11021 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break; 11022 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break; 11023 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break; 11024 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break; 11025 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break; 11026 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break; 11027 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break; 11028 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 11029 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 11030 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break; 11031 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break; 11032 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break; 11033 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx, 11034 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break; 11035 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break; 11036 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break; 11037 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break; 11038 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break; 11039 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break; 11040 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break; 11041 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break; 11042 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break; 11043 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break; 11044 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break; 11045 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break; 11046 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break; 11047 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break; 11048 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break; 11049 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break; 11050 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break; 11051 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break; 11052 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break; 11053 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break; 11054 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break; 11055 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break; 11056 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break; 11057 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break; 11058 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break; 11059 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break; 11060 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break; 11061 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break; 11062 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break; 11063 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break; 11064 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break; 11065 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break; 11066 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break; 11067 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break; 11068 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break; 11069 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break; 11070 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break; 11071 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break; 11072 } 11073 } 11074 11075 /* 11076 * Fire of the DBGF event, if enabled (our check here is just a quick one, 11077 * the DBGF call will do a full check). 11078 * 11079 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap. 11080 * Note! If we have to events, we prioritize the first, i.e. the instruction 11081 * one, in order to avoid event nesting. 11082 */ 11083 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 11084 if ( enmEvent1 != DBGFEVENT_END 11085 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1)) 11086 { 11087 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 11088 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg); 11089 if (rcStrict != VINF_SUCCESS) 11090 return rcStrict; 11091 } 11092 else if ( enmEvent2 != DBGFEVENT_END 11093 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2)) 11094 { 11095 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 11096 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg); 11097 if (rcStrict != VINF_SUCCESS) 11098 return rcStrict; 11099 } 11100 11101 return VINF_SUCCESS; 11102 } 11103 11104 11105 /** 11106 * Single-stepping VM-exit filtering. 11107 * 11108 * This is preprocessing the VM-exits and deciding whether we've gotten far 11109 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit 11110 * handling is performed. 11111 * 11112 * @returns Strict VBox status code (i.e. informational status codes too). 11113 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 11114 * @param pVmxTransient The VMX-transient structure. 11115 * @param pDbgState The debug state. 11116 */ 11117 DECLINLINE(VBOXSTRICTRC) vmxHCRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 11118 { 11119 /* 11120 * Expensive (saves context) generic dtrace VM-exit probe. 11121 */ 11122 uint32_t const uExitReason = pVmxTransient->uExitReason; 11123 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED()) 11124 { /* more likely */ } 11125 else 11126 { 11127 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient); 11128 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL); 11129 AssertRC(rc); 11130 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual); 11131 } 11132 11133 #ifndef IN_NEM_DARWIN 11134 /* 11135 * Check for host NMI, just to get that out of the way. 11136 */ 11137 if (uExitReason != VMX_EXIT_XCPT_OR_NMI) 11138 { /* normally likely */ } 11139 else 11140 { 11141 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient); 11142 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo); 11143 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI) 11144 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo); 11145 } 11146 #endif 11147 11148 /* 11149 * Check for single stepping event if we're stepping. 11150 */ 11151 if (VCPU_2_VMXSTATE(pVCpu).fSingleInstruction) 11152 { 11153 switch (uExitReason) 11154 { 11155 case VMX_EXIT_MTF: 11156 return vmxHCExitMtf(pVCpu, pVmxTransient); 11157 11158 /* Various events: */ 11159 case VMX_EXIT_XCPT_OR_NMI: 11160 case VMX_EXIT_EXT_INT: 11161 case VMX_EXIT_TRIPLE_FAULT: 11162 case VMX_EXIT_INT_WINDOW: 11163 case VMX_EXIT_NMI_WINDOW: 11164 case VMX_EXIT_TASK_SWITCH: 11165 case VMX_EXIT_TPR_BELOW_THRESHOLD: 11166 case VMX_EXIT_APIC_ACCESS: 11167 case VMX_EXIT_EPT_VIOLATION: 11168 case VMX_EXIT_EPT_MISCONFIG: 11169 case VMX_EXIT_PREEMPT_TIMER: 11170 11171 /* Instruction specific VM-exits: */ 11172 case VMX_EXIT_CPUID: 11173 case VMX_EXIT_GETSEC: 11174 case VMX_EXIT_HLT: 11175 case VMX_EXIT_INVD: 11176 case VMX_EXIT_INVLPG: 11177 case VMX_EXIT_RDPMC: 11178 case VMX_EXIT_RDTSC: 11179 case VMX_EXIT_RSM: 11180 case VMX_EXIT_VMCALL: 11181 case VMX_EXIT_VMCLEAR: 11182 case VMX_EXIT_VMLAUNCH: 11183 case VMX_EXIT_VMPTRLD: 11184 case VMX_EXIT_VMPTRST: 11185 case VMX_EXIT_VMREAD: 11186 case VMX_EXIT_VMRESUME: 11187 case VMX_EXIT_VMWRITE: 11188 case VMX_EXIT_VMXOFF: 11189 case VMX_EXIT_VMXON: 11190 case VMX_EXIT_MOV_CRX: 11191 case VMX_EXIT_MOV_DRX: 11192 case VMX_EXIT_IO_INSTR: 11193 case VMX_EXIT_RDMSR: 11194 case VMX_EXIT_WRMSR: 11195 case VMX_EXIT_MWAIT: 11196 case VMX_EXIT_MONITOR: 11197 case VMX_EXIT_PAUSE: 11198 case VMX_EXIT_GDTR_IDTR_ACCESS: 11199 case VMX_EXIT_LDTR_TR_ACCESS: 11200 case VMX_EXIT_INVEPT: 11201 case VMX_EXIT_RDTSCP: 11202 case VMX_EXIT_INVVPID: 11203 case VMX_EXIT_WBINVD: 11204 case VMX_EXIT_XSETBV: 11205 case VMX_EXIT_RDRAND: 11206 case VMX_EXIT_INVPCID: 11207 case VMX_EXIT_VMFUNC: 11208 case VMX_EXIT_RDSEED: 11209 case VMX_EXIT_XSAVES: 11210 case VMX_EXIT_XRSTORS: 11211 { 11212 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP); 11213 AssertRCReturn(rc, rc); 11214 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart 11215 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart) 11216 return VINF_EM_DBG_STEPPED; 11217 break; 11218 } 11219 11220 /* Errors and unexpected events: */ 11221 case VMX_EXIT_INIT_SIGNAL: 11222 case VMX_EXIT_SIPI: 11223 case VMX_EXIT_IO_SMI: 11224 case VMX_EXIT_SMI: 11225 case VMX_EXIT_ERR_INVALID_GUEST_STATE: 11226 case VMX_EXIT_ERR_MSR_LOAD: 11227 case VMX_EXIT_ERR_MACHINE_CHECK: 11228 case VMX_EXIT_PML_FULL: 11229 case VMX_EXIT_VIRTUALIZED_EOI: 11230 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */ 11231 break; 11232 11233 default: 11234 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason)); 11235 break; 11236 } 11237 } 11238 11239 /* 11240 * Check for debugger event breakpoints and dtrace probes. 11241 */ 11242 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U 11243 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) ) 11244 { 11245 VBOXSTRICTRC rcStrict = vmxHCHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason); 11246 if (rcStrict != VINF_SUCCESS) 11247 return rcStrict; 11248 } 11249 11250 /* 11251 * Normal processing. 11252 */ 11253 #ifdef HMVMX_USE_FUNCTION_TABLE 11254 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient); 11255 #else 11256 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason); 11257 #endif 11258 } 11259 11260 /** @} */ -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r93574 r93748 2140 2140 * @returns VBox status code. 2141 2141 * @param pVM The cross context VM structure. 2142 * 2143 * @sa nemR3DarwinSetupLbrMsrRange 2142 2144 */ 2143 2145 static int hmR0VmxSetupLbrMsrRange(PVMCC pVM) … … 6871 6873 6872 6874 /** 6873 * Transient per-VCPU debug state of VMCS and related info. we save/restore in6874 * the debug run loop.6875 */6876 typedef struct VMXRUNDBGSTATE6877 {6878 /** The RIP we started executing at. This is for detecting that we stepped. */6879 uint64_t uRipStart;6880 /** The CS we started executing with. */6881 uint16_t uCsStart;6882 6883 /** Whether we've actually modified the 1st execution control field. */6884 bool fModifiedProcCtls : 1;6885 /** Whether we've actually modified the 2nd execution control field. */6886 bool fModifiedProcCtls2 : 1;6887 /** Whether we've actually modified the exception bitmap. */6888 bool fModifiedXcptBitmap : 1;6889 6890 /** We desire the modified the CR0 mask to be cleared. */6891 bool fClearCr0Mask : 1;6892 /** We desire the modified the CR4 mask to be cleared. */6893 bool fClearCr4Mask : 1;6894 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC. */6895 uint32_t fCpe1Extra;6896 /** Stuff we do not want in VMX_VMCS32_CTRL_PROC_EXEC. */6897 uint32_t fCpe1Unwanted;6898 /** Stuff we need in VMX_VMCS32_CTRL_PROC_EXEC2. */6899 uint32_t fCpe2Extra;6900 /** Extra stuff we need in VMX_VMCS32_CTRL_EXCEPTION_BITMAP. */6901 uint32_t bmXcptExtra;6902 /** The sequence number of the Dtrace provider settings the state was6903 * configured against. */6904 uint32_t uDtraceSettingsSeqNo;6905 /** VM-exits to check (one bit per VM-exit). */6906 uint32_t bmExitsToCheck[3];6907 6908 /** The initial VMX_VMCS32_CTRL_PROC_EXEC value (helps with restore). */6909 uint32_t fProcCtlsInitial;6910 /** The initial VMX_VMCS32_CTRL_PROC_EXEC2 value (helps with restore). */6911 uint32_t fProcCtls2Initial;6912 /** The initial VMX_VMCS32_CTRL_EXCEPTION_BITMAP value (helps with restore). */6913 uint32_t bmXcptInitial;6914 } VMXRUNDBGSTATE;6915 AssertCompileMemberSize(VMXRUNDBGSTATE, bmExitsToCheck, (VMX_EXIT_MAX + 1 + 31) / 32 * 4);6916 typedef VMXRUNDBGSTATE *PVMXRUNDBGSTATE;6917 6918 6919 /**6920 * Initializes the VMXRUNDBGSTATE structure.6921 *6922 * @param pVCpu The cross context virtual CPU structure of the6923 * calling EMT.6924 * @param pVmxTransient The VMX-transient structure.6925 * @param pDbgState The debug state to initialize.6926 */6927 static void hmR0VmxRunDebugStateInit(PVMCPUCC pVCpu, PCVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)6928 {6929 pDbgState->uRipStart = pVCpu->cpum.GstCtx.rip;6930 pDbgState->uCsStart = pVCpu->cpum.GstCtx.cs.Sel;6931 6932 pDbgState->fModifiedProcCtls = false;6933 pDbgState->fModifiedProcCtls2 = false;6934 pDbgState->fModifiedXcptBitmap = false;6935 pDbgState->fClearCr0Mask = false;6936 pDbgState->fClearCr4Mask = false;6937 pDbgState->fCpe1Extra = 0;6938 pDbgState->fCpe1Unwanted = 0;6939 pDbgState->fCpe2Extra = 0;6940 pDbgState->bmXcptExtra = 0;6941 pDbgState->fProcCtlsInitial = pVmxTransient->pVmcsInfo->u32ProcCtls;6942 pDbgState->fProcCtls2Initial = pVmxTransient->pVmcsInfo->u32ProcCtls2;6943 pDbgState->bmXcptInitial = pVmxTransient->pVmcsInfo->u32XcptBitmap;6944 }6945 6946 6947 /**6948 * Updates the VMSC fields with changes requested by @a pDbgState.6949 *6950 * This is performed after hmR0VmxPreRunGuestDebugStateUpdate as well6951 * immediately before executing guest code, i.e. when interrupts are disabled.6952 * We don't check status codes here as we cannot easily assert or return in the6953 * latter case.6954 *6955 * @param pVCpu The cross context virtual CPU structure.6956 * @param pVmxTransient The VMX-transient structure.6957 * @param pDbgState The debug state.6958 */6959 static void hmR0VmxPreRunGuestDebugStateApply(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)6960 {6961 /*6962 * Ensure desired flags in VMCS control fields are set.6963 * (Ignoring write failure here, as we're committed and it's just debug extras.)6964 *6965 * Note! We load the shadow CR0 & CR4 bits when we flag the clearing, so6966 * there should be no stale data in pCtx at this point.6967 */6968 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;6969 if ( (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Extra) != pDbgState->fCpe1Extra6970 || (pVmcsInfo->u32ProcCtls & pDbgState->fCpe1Unwanted))6971 {6972 pVmcsInfo->u32ProcCtls |= pDbgState->fCpe1Extra;6973 pVmcsInfo->u32ProcCtls &= ~pDbgState->fCpe1Unwanted;6974 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVmcsInfo->u32ProcCtls);6975 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC: %#RX32\n", pVmcsInfo->u32ProcCtls));6976 pDbgState->fModifiedProcCtls = true;6977 }6978 6979 if ((pVmcsInfo->u32ProcCtls2 & pDbgState->fCpe2Extra) != pDbgState->fCpe2Extra)6980 {6981 pVmcsInfo->u32ProcCtls2 |= pDbgState->fCpe2Extra;6982 VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pVmcsInfo->u32ProcCtls2);6983 Log6Func(("VMX_VMCS32_CTRL_PROC_EXEC2: %#RX32\n", pVmcsInfo->u32ProcCtls2));6984 pDbgState->fModifiedProcCtls2 = true;6985 }6986 6987 if ((pVmcsInfo->u32XcptBitmap & pDbgState->bmXcptExtra) != pDbgState->bmXcptExtra)6988 {6989 pVmcsInfo->u32XcptBitmap |= pDbgState->bmXcptExtra;6990 VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVmcsInfo->u32XcptBitmap);6991 Log6Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP: %#RX32\n", pVmcsInfo->u32XcptBitmap));6992 pDbgState->fModifiedXcptBitmap = true;6993 }6994 6995 if (pDbgState->fClearCr0Mask && pVmcsInfo->u64Cr0Mask != 0)6996 {6997 pVmcsInfo->u64Cr0Mask = 0;6998 VMXWriteVmcsNw(VMX_VMCS_CTRL_CR0_MASK, 0);6999 Log6Func(("VMX_VMCS_CTRL_CR0_MASK: 0\n"));7000 }7001 7002 if (pDbgState->fClearCr4Mask && pVmcsInfo->u64Cr4Mask != 0)7003 {7004 pVmcsInfo->u64Cr4Mask = 0;7005 VMXWriteVmcsNw(VMX_VMCS_CTRL_CR4_MASK, 0);7006 Log6Func(("VMX_VMCS_CTRL_CR4_MASK: 0\n"));7007 }7008 7009 NOREF(pVCpu);7010 }7011 7012 7013 /**7014 * Restores VMCS fields that were changed by hmR0VmxPreRunGuestDebugStateApply for7015 * re-entry next time around.7016 *7017 * @returns Strict VBox status code (i.e. informational status codes too).7018 * @param pVCpu The cross context virtual CPU structure.7019 * @param pVmxTransient The VMX-transient structure.7020 * @param pDbgState The debug state.7021 * @param rcStrict The return code from executing the guest using single7022 * stepping.7023 */7024 static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState,7025 VBOXSTRICTRC rcStrict)7026 {7027 /*7028 * Restore VM-exit control settings as we may not reenter this function the7029 * next time around.7030 */7031 PVMXVMCSINFO pVmcsInfo = pVmxTransient->pVmcsInfo;7032 7033 /* We reload the initial value, trigger what we can of recalculations the7034 next time around. From the looks of things, that's all that's required atm. */7035 if (pDbgState->fModifiedProcCtls)7036 {7037 if (!(pDbgState->fProcCtlsInitial & VMX_PROC_CTLS_MOV_DR_EXIT) && CPUMIsHyperDebugStateActive(pVCpu))7038 pDbgState->fProcCtlsInitial |= VMX_PROC_CTLS_MOV_DR_EXIT; /* Avoid assertion in hmR0VmxLeave */7039 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pDbgState->fProcCtlsInitial);7040 AssertRC(rc2);7041 pVmcsInfo->u32ProcCtls = pDbgState->fProcCtlsInitial;7042 }7043 7044 /* We're currently the only ones messing with this one, so just restore the7045 cached value and reload the field. */7046 if ( pDbgState->fModifiedProcCtls27047 && pVmcsInfo->u32ProcCtls2 != pDbgState->fProcCtls2Initial)7048 {7049 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, pDbgState->fProcCtls2Initial);7050 AssertRC(rc2);7051 pVmcsInfo->u32ProcCtls2 = pDbgState->fProcCtls2Initial;7052 }7053 7054 /* If we've modified the exception bitmap, we restore it and trigger7055 reloading and partial recalculation the next time around. */7056 if (pDbgState->fModifiedXcptBitmap)7057 pVmcsInfo->u32XcptBitmap = pDbgState->bmXcptInitial;7058 7059 return rcStrict;7060 }7061 7062 7063 /**7064 * Configures VM-exit controls for current DBGF and DTrace settings.7065 *7066 * This updates @a pDbgState and the VMCS execution control fields to reflect7067 * the necessary VM-exits demanded by DBGF and DTrace.7068 *7069 * @param pVCpu The cross context virtual CPU structure.7070 * @param pVmxTransient The VMX-transient structure. May update7071 * fUpdatedTscOffsettingAndPreemptTimer.7072 * @param pDbgState The debug state.7073 */7074 static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)7075 {7076 /*7077 * Take down the dtrace serial number so we can spot changes.7078 */7079 pDbgState->uDtraceSettingsSeqNo = VBOXVMM_GET_SETTINGS_SEQ_NO();7080 ASMCompilerBarrier();7081 7082 /*7083 * We'll rebuild most of the middle block of data members (holding the7084 * current settings) as we go along here, so start by clearing it all.7085 */7086 pDbgState->bmXcptExtra = 0;7087 pDbgState->fCpe1Extra = 0;7088 pDbgState->fCpe1Unwanted = 0;7089 pDbgState->fCpe2Extra = 0;7090 for (unsigned i = 0; i < RT_ELEMENTS(pDbgState->bmExitsToCheck); i++)7091 pDbgState->bmExitsToCheck[i] = 0;7092 7093 /*7094 * Software interrupts (INT XXh) - no idea how to trigger these...7095 */7096 PVMCC pVM = pVCpu->CTX_SUFF(pVM);7097 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE)7098 || VBOXVMM_INT_SOFTWARE_ENABLED())7099 {7100 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);7101 }7102 7103 /*7104 * INT3 breakpoints - triggered by #BP exceptions.7105 */7106 if (pVM->dbgf.ro.cEnabledInt3Breakpoints > 0)7107 pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);7108 7109 /*7110 * Exception bitmap and XCPT events+probes.7111 */7112 for (int iXcpt = 0; iXcpt < (DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST + 1); iXcpt++)7113 if (DBGF_IS_EVENT_ENABLED(pVM, (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + iXcpt)))7114 pDbgState->bmXcptExtra |= RT_BIT_32(iXcpt);7115 7116 if (VBOXVMM_XCPT_DE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DE);7117 if (VBOXVMM_XCPT_DB_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DB);7118 if (VBOXVMM_XCPT_BP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BP);7119 if (VBOXVMM_XCPT_OF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_OF);7120 if (VBOXVMM_XCPT_BR_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_BR);7121 if (VBOXVMM_XCPT_UD_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_UD);7122 if (VBOXVMM_XCPT_NM_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NM);7123 if (VBOXVMM_XCPT_DF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_DF);7124 if (VBOXVMM_XCPT_TS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_TS);7125 if (VBOXVMM_XCPT_NP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_NP);7126 if (VBOXVMM_XCPT_SS_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SS);7127 if (VBOXVMM_XCPT_GP_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_GP);7128 if (VBOXVMM_XCPT_PF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_PF);7129 if (VBOXVMM_XCPT_MF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_MF);7130 if (VBOXVMM_XCPT_AC_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_AC);7131 if (VBOXVMM_XCPT_XF_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_XF);7132 if (VBOXVMM_XCPT_VE_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_VE);7133 if (VBOXVMM_XCPT_SX_ENABLED()) pDbgState->bmXcptExtra |= RT_BIT_32(X86_XCPT_SX);7134 7135 if (pDbgState->bmXcptExtra)7136 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_XCPT_OR_NMI);7137 7138 /*7139 * Process events and probes for VM-exits, making sure we get the wanted VM-exits.7140 *7141 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does.7142 * So, when adding/changing/removing please don't forget to update it.7143 *7144 * Some of the macros are picking up local variables to save horizontal space,7145 * (being able to see it in a table is the lesser evil here).7146 */7147 #define IS_EITHER_ENABLED(a_pVM, a_EventSubName) \7148 ( DBGF_IS_EVENT_ENABLED(a_pVM, RT_CONCAT(DBGFEVENT_, a_EventSubName)) \7149 || RT_CONCAT3(VBOXVMM_, a_EventSubName, _ENABLED)() )7150 #define SET_ONLY_XBM_IF_EITHER_EN(a_EventSubName, a_uExit) \7151 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \7152 { AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \7153 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \7154 } else do { } while (0)7155 #define SET_CPE1_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec) \7156 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \7157 { \7158 (pDbgState)->fCpe1Extra |= (a_fCtrlProcExec); \7159 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \7160 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \7161 } else do { } while (0)7162 #define SET_CPEU_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fUnwantedCtrlProcExec) \7163 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \7164 { \7165 (pDbgState)->fCpe1Unwanted |= (a_fUnwantedCtrlProcExec); \7166 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \7167 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \7168 } else do { } while (0)7169 #define SET_CPE2_XBM_IF_EITHER_EN(a_EventSubName, a_uExit, a_fCtrlProcExec2) \7170 if (IS_EITHER_ENABLED(pVM, a_EventSubName)) \7171 { \7172 (pDbgState)->fCpe2Extra |= (a_fCtrlProcExec2); \7173 AssertCompile((unsigned)(a_uExit) < sizeof(pDbgState->bmExitsToCheck) * 8); \7174 ASMBitSet((pDbgState)->bmExitsToCheck, a_uExit); \7175 } else do { } while (0)7176 7177 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */7178 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */7179 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */7180 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */7181 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */7182 7183 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */7184 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID);7185 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */7186 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC);7187 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */7188 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT);7189 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */7190 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD);7191 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT);7192 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVLPG, VMX_EXIT_INVLPG);7193 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDPMC, VMX_EXIT_RDPMC, VMX_PROC_CTLS_RDPMC_EXIT);7194 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDPMC, VMX_EXIT_RDPMC);7195 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT);7196 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC);7197 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */7198 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM);7199 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */7200 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL);7201 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */7202 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR);7203 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */7204 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH);7205 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */7206 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD);7207 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */7208 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST);7209 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */7210 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD);7211 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */7212 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME);7213 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */7214 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE);7215 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */7216 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF);7217 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */7218 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON);7219 7220 if ( IS_EITHER_ENABLED(pVM, INSTR_CRX_READ)7221 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))7222 {7223 int rc = hmR0VmxImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR47224 | CPUMCTX_EXTRN_APIC_TPR);7225 AssertRC(rc);7226 7227 #if 0 /** @todo fix me */7228 pDbgState->fClearCr0Mask = true;7229 pDbgState->fClearCr4Mask = true;7230 #endif7231 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_READ))7232 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_STORE_EXIT | VMX_PROC_CTLS_CR8_STORE_EXIT;7233 if (IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE))7234 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_CR3_LOAD_EXIT | VMX_PROC_CTLS_CR8_LOAD_EXIT;7235 pDbgState->fCpe1Unwanted |= VMX_PROC_CTLS_USE_TPR_SHADOW; /* risky? */7236 /* Note! We currently don't use VMX_VMCS32_CTRL_CR3_TARGET_COUNT. It would7237 require clearing here and in the loop if we start using it. */7238 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_CRX);7239 }7240 else7241 {7242 if (pDbgState->fClearCr0Mask)7243 {7244 pDbgState->fClearCr0Mask = false;7245 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0);7246 }7247 if (pDbgState->fClearCr4Mask)7248 {7249 pDbgState->fClearCr4Mask = false;7250 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4);7251 }7252 }7253 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_READ, VMX_EXIT_MOV_CRX);7254 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CRX_WRITE, VMX_EXIT_MOV_CRX);7255 7256 if ( IS_EITHER_ENABLED(pVM, INSTR_DRX_READ)7257 || IS_EITHER_ENABLED(pVM, INSTR_DRX_WRITE))7258 {7259 /** @todo later, need to fix handler as it assumes this won't usually happen. */7260 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_MOV_DRX);7261 }7262 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_READ, VMX_EXIT_MOV_DRX);7263 SET_ONLY_XBM_IF_EITHER_EN( EXIT_DRX_WRITE, VMX_EXIT_MOV_DRX);7264 7265 SET_CPEU_XBM_IF_EITHER_EN(INSTR_RDMSR, VMX_EXIT_RDMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS); /* risky clearing this? */7266 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDMSR, VMX_EXIT_RDMSR);7267 SET_CPEU_XBM_IF_EITHER_EN(INSTR_WRMSR, VMX_EXIT_WRMSR, VMX_PROC_CTLS_USE_MSR_BITMAPS);7268 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WRMSR, VMX_EXIT_WRMSR);7269 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MWAIT, VMX_EXIT_MWAIT, VMX_PROC_CTLS_MWAIT_EXIT); /* paranoia */7270 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MWAIT, VMX_EXIT_MWAIT);7271 SET_CPE1_XBM_IF_EITHER_EN(INSTR_MONITOR, VMX_EXIT_MONITOR, VMX_PROC_CTLS_MONITOR_EXIT); /* paranoia */7272 SET_ONLY_XBM_IF_EITHER_EN( EXIT_MONITOR, VMX_EXIT_MONITOR);7273 #if 0 /** @todo too slow, fix handler. */7274 SET_CPE1_XBM_IF_EITHER_EN(INSTR_PAUSE, VMX_EXIT_PAUSE, VMX_PROC_CTLS_PAUSE_EXIT);7275 #endif7276 SET_ONLY_XBM_IF_EITHER_EN( EXIT_PAUSE, VMX_EXIT_PAUSE);7277 7278 if ( IS_EITHER_ENABLED(pVM, INSTR_SGDT)7279 || IS_EITHER_ENABLED(pVM, INSTR_SIDT)7280 || IS_EITHER_ENABLED(pVM, INSTR_LGDT)7281 || IS_EITHER_ENABLED(pVM, INSTR_LIDT))7282 {7283 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;7284 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_GDTR_IDTR_ACCESS);7285 }7286 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SGDT, VMX_EXIT_GDTR_IDTR_ACCESS);7287 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SIDT, VMX_EXIT_GDTR_IDTR_ACCESS);7288 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LGDT, VMX_EXIT_GDTR_IDTR_ACCESS);7289 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LIDT, VMX_EXIT_GDTR_IDTR_ACCESS);7290 7291 if ( IS_EITHER_ENABLED(pVM, INSTR_SLDT)7292 || IS_EITHER_ENABLED(pVM, INSTR_STR)7293 || IS_EITHER_ENABLED(pVM, INSTR_LLDT)7294 || IS_EITHER_ENABLED(pVM, INSTR_LTR))7295 {7296 pDbgState->fCpe2Extra |= VMX_PROC_CTLS2_DESC_TABLE_EXIT;7297 ASMBitSet(pDbgState->bmExitsToCheck, VMX_EXIT_LDTR_TR_ACCESS);7298 }7299 SET_ONLY_XBM_IF_EITHER_EN( EXIT_SLDT, VMX_EXIT_LDTR_TR_ACCESS);7300 SET_ONLY_XBM_IF_EITHER_EN( EXIT_STR, VMX_EXIT_LDTR_TR_ACCESS);7301 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LLDT, VMX_EXIT_LDTR_TR_ACCESS);7302 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS);7303 7304 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */7305 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT);7306 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT);7307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP);7308 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */7309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID);7310 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT);7311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD);7312 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */7313 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV);7314 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT);7315 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDRAND, VMX_EXIT_RDRAND);7316 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT);7317 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID);7318 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */7319 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC);7320 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT);7321 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED);7322 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */7323 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES);7324 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */7325 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS);7326 7327 #undef IS_EITHER_ENABLED7328 #undef SET_ONLY_XBM_IF_EITHER_EN7329 #undef SET_CPE1_XBM_IF_EITHER_EN7330 #undef SET_CPEU_XBM_IF_EITHER_EN7331 #undef SET_CPE2_XBM_IF_EITHER_EN7332 7333 /*7334 * Sanitize the control stuff.7335 */7336 pDbgState->fCpe2Extra &= g_HmMsrs.u.vmx.ProcCtls2.n.allowed1;7337 if (pDbgState->fCpe2Extra)7338 pDbgState->fCpe1Extra |= VMX_PROC_CTLS_USE_SECONDARY_CTLS;7339 pDbgState->fCpe1Extra &= g_HmMsrs.u.vmx.ProcCtls.n.allowed1;7340 pDbgState->fCpe1Unwanted &= ~g_HmMsrs.u.vmx.ProcCtls.n.allowed0;7341 if (pVCpu->hmr0.s.fDebugWantRdTscExit != RT_BOOL(pDbgState->fCpe1Extra & VMX_PROC_CTLS_RDTSC_EXIT))7342 {7343 pVCpu->hmr0.s.fDebugWantRdTscExit ^= true;7344 pVmxTransient->fUpdatedTscOffsettingAndPreemptTimer = false;7345 }7346 7347 Log6(("HM: debug state: cpe1=%#RX32 cpeu=%#RX32 cpe2=%#RX32%s%s\n",7348 pDbgState->fCpe1Extra, pDbgState->fCpe1Unwanted, pDbgState->fCpe2Extra,7349 pDbgState->fClearCr0Mask ? " clr-cr0" : "",7350 pDbgState->fClearCr4Mask ? " clr-cr4" : ""));7351 }7352 7353 7354 /**7355 * Fires off DBGF events and dtrace probes for a VM-exit, when it's7356 * appropriate.7357 *7358 * The caller has checked the VM-exit against the7359 * VMXRUNDBGSTATE::bmExitsToCheck bitmap. The caller has checked for NMIs7360 * already, so we don't have to do that either.7361 *7362 * @returns Strict VBox status code (i.e. informational status codes too).7363 * @param pVCpu The cross context virtual CPU structure.7364 * @param pVmxTransient The VMX-transient structure.7365 * @param uExitReason The VM-exit reason.7366 *7367 * @remarks The name of this function is displayed by dtrace, so keep it short7368 * and to the point. No longer than 33 chars long, please.7369 */7370 static VBOXSTRICTRC hmR0VmxHandleExitDtraceEvents(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, uint32_t uExitReason)7371 {7372 /*7373 * Translate the event into a DBGF event (enmEvent + uEventArg) and at the7374 * same time check whether any corresponding Dtrace event is enabled (fDtrace).7375 *7376 * Note! This is the reverse operation of what hmR0VmxPreRunGuestDebugStateUpdate7377 * does. Must add/change/remove both places. Same ordering, please.7378 *7379 * Added/removed events must also be reflected in the next section7380 * where we dispatch dtrace events.7381 */7382 bool fDtrace1 = false;7383 bool fDtrace2 = false;7384 DBGFEVENTTYPE enmEvent1 = DBGFEVENT_END;7385 DBGFEVENTTYPE enmEvent2 = DBGFEVENT_END;7386 uint32_t uEventArg = 0;7387 #define SET_EXIT(a_EventSubName) \7388 do { \7389 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \7390 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \7391 } while (0)7392 #define SET_BOTH(a_EventSubName) \7393 do { \7394 enmEvent1 = RT_CONCAT(DBGFEVENT_INSTR_, a_EventSubName); \7395 enmEvent2 = RT_CONCAT(DBGFEVENT_EXIT_, a_EventSubName); \7396 fDtrace1 = RT_CONCAT3(VBOXVMM_INSTR_, a_EventSubName, _ENABLED)(); \7397 fDtrace2 = RT_CONCAT3(VBOXVMM_EXIT_, a_EventSubName, _ENABLED)(); \7398 } while (0)7399 switch (uExitReason)7400 {7401 case VMX_EXIT_MTF:7402 return vmxHCExitMtf(pVCpu, pVmxTransient);7403 7404 case VMX_EXIT_XCPT_OR_NMI:7405 {7406 uint8_t const idxVector = VMX_EXIT_INT_INFO_VECTOR(pVmxTransient->uExitIntInfo);7407 switch (VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo))7408 {7409 case VMX_EXIT_INT_INFO_TYPE_HW_XCPT:7410 case VMX_EXIT_INT_INFO_TYPE_SW_XCPT:7411 case VMX_EXIT_INT_INFO_TYPE_PRIV_SW_XCPT:7412 if (idxVector <= (unsigned)(DBGFEVENT_XCPT_LAST - DBGFEVENT_XCPT_FIRST))7413 {7414 if (VMX_EXIT_INT_INFO_IS_ERROR_CODE_VALID(pVmxTransient->uExitIntInfo))7415 {7416 vmxHCReadExitIntErrorCodeVmcs(pVCpu, pVmxTransient);7417 uEventArg = pVmxTransient->uExitIntErrorCode;7418 }7419 enmEvent1 = (DBGFEVENTTYPE)(DBGFEVENT_XCPT_FIRST + idxVector);7420 switch (enmEvent1)7421 {7422 case DBGFEVENT_XCPT_DE: fDtrace1 = VBOXVMM_XCPT_DE_ENABLED(); break;7423 case DBGFEVENT_XCPT_DB: fDtrace1 = VBOXVMM_XCPT_DB_ENABLED(); break;7424 case DBGFEVENT_XCPT_BP: fDtrace1 = VBOXVMM_XCPT_BP_ENABLED(); break;7425 case DBGFEVENT_XCPT_OF: fDtrace1 = VBOXVMM_XCPT_OF_ENABLED(); break;7426 case DBGFEVENT_XCPT_BR: fDtrace1 = VBOXVMM_XCPT_BR_ENABLED(); break;7427 case DBGFEVENT_XCPT_UD: fDtrace1 = VBOXVMM_XCPT_UD_ENABLED(); break;7428 case DBGFEVENT_XCPT_NM: fDtrace1 = VBOXVMM_XCPT_NM_ENABLED(); break;7429 case DBGFEVENT_XCPT_DF: fDtrace1 = VBOXVMM_XCPT_DF_ENABLED(); break;7430 case DBGFEVENT_XCPT_TS: fDtrace1 = VBOXVMM_XCPT_TS_ENABLED(); break;7431 case DBGFEVENT_XCPT_NP: fDtrace1 = VBOXVMM_XCPT_NP_ENABLED(); break;7432 case DBGFEVENT_XCPT_SS: fDtrace1 = VBOXVMM_XCPT_SS_ENABLED(); break;7433 case DBGFEVENT_XCPT_GP: fDtrace1 = VBOXVMM_XCPT_GP_ENABLED(); break;7434 case DBGFEVENT_XCPT_PF: fDtrace1 = VBOXVMM_XCPT_PF_ENABLED(); break;7435 case DBGFEVENT_XCPT_MF: fDtrace1 = VBOXVMM_XCPT_MF_ENABLED(); break;7436 case DBGFEVENT_XCPT_AC: fDtrace1 = VBOXVMM_XCPT_AC_ENABLED(); break;7437 case DBGFEVENT_XCPT_XF: fDtrace1 = VBOXVMM_XCPT_XF_ENABLED(); break;7438 case DBGFEVENT_XCPT_VE: fDtrace1 = VBOXVMM_XCPT_VE_ENABLED(); break;7439 case DBGFEVENT_XCPT_SX: fDtrace1 = VBOXVMM_XCPT_SX_ENABLED(); break;7440 default: break;7441 }7442 }7443 else7444 AssertFailed();7445 break;7446 7447 case VMX_EXIT_INT_INFO_TYPE_SW_INT:7448 uEventArg = idxVector;7449 enmEvent1 = DBGFEVENT_INTERRUPT_SOFTWARE;7450 fDtrace1 = VBOXVMM_INT_SOFTWARE_ENABLED();7451 break;7452 }7453 break;7454 }7455 7456 case VMX_EXIT_TRIPLE_FAULT:7457 enmEvent1 = DBGFEVENT_TRIPLE_FAULT;7458 //fDtrace1 = VBOXVMM_EXIT_TRIPLE_FAULT_ENABLED();7459 break;7460 case VMX_EXIT_TASK_SWITCH: SET_EXIT(TASK_SWITCH); break;7461 case VMX_EXIT_EPT_VIOLATION: SET_EXIT(VMX_EPT_VIOLATION); break;7462 case VMX_EXIT_EPT_MISCONFIG: SET_EXIT(VMX_EPT_MISCONFIG); break;7463 case VMX_EXIT_APIC_ACCESS: SET_EXIT(VMX_VAPIC_ACCESS); break;7464 case VMX_EXIT_APIC_WRITE: SET_EXIT(VMX_VAPIC_WRITE); break;7465 7466 /* Instruction specific VM-exits: */7467 case VMX_EXIT_CPUID: SET_BOTH(CPUID); break;7468 case VMX_EXIT_GETSEC: SET_BOTH(GETSEC); break;7469 case VMX_EXIT_HLT: SET_BOTH(HALT); break;7470 case VMX_EXIT_INVD: SET_BOTH(INVD); break;7471 case VMX_EXIT_INVLPG: SET_BOTH(INVLPG); break;7472 case VMX_EXIT_RDPMC: SET_BOTH(RDPMC); break;7473 case VMX_EXIT_RDTSC: SET_BOTH(RDTSC); break;7474 case VMX_EXIT_RSM: SET_BOTH(RSM); break;7475 case VMX_EXIT_VMCALL: SET_BOTH(VMM_CALL); break;7476 case VMX_EXIT_VMCLEAR: SET_BOTH(VMX_VMCLEAR); break;7477 case VMX_EXIT_VMLAUNCH: SET_BOTH(VMX_VMLAUNCH); break;7478 case VMX_EXIT_VMPTRLD: SET_BOTH(VMX_VMPTRLD); break;7479 case VMX_EXIT_VMPTRST: SET_BOTH(VMX_VMPTRST); break;7480 case VMX_EXIT_VMREAD: SET_BOTH(VMX_VMREAD); break;7481 case VMX_EXIT_VMRESUME: SET_BOTH(VMX_VMRESUME); break;7482 case VMX_EXIT_VMWRITE: SET_BOTH(VMX_VMWRITE); break;7483 case VMX_EXIT_VMXOFF: SET_BOTH(VMX_VMXOFF); break;7484 case VMX_EXIT_VMXON: SET_BOTH(VMX_VMXON); break;7485 case VMX_EXIT_MOV_CRX:7486 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);7487 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQual) == VMX_EXIT_QUAL_CRX_ACCESS_READ)7488 SET_BOTH(CRX_READ);7489 else7490 SET_BOTH(CRX_WRITE);7491 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQual);7492 break;7493 case VMX_EXIT_MOV_DRX:7494 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);7495 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQual)7496 == VMX_EXIT_QUAL_DRX_DIRECTION_READ)7497 SET_BOTH(DRX_READ);7498 else7499 SET_BOTH(DRX_WRITE);7500 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQual);7501 break;7502 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break;7503 case VMX_EXIT_WRMSR: SET_BOTH(WRMSR); break;7504 case VMX_EXIT_MWAIT: SET_BOTH(MWAIT); break;7505 case VMX_EXIT_MONITOR: SET_BOTH(MONITOR); break;7506 case VMX_EXIT_PAUSE: SET_BOTH(PAUSE); break;7507 case VMX_EXIT_GDTR_IDTR_ACCESS:7508 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);7509 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_XDTR_INSINFO_INSTR_ID))7510 {7511 case VMX_XDTR_INSINFO_II_SGDT: SET_BOTH(SGDT); break;7512 case VMX_XDTR_INSINFO_II_SIDT: SET_BOTH(SIDT); break;7513 case VMX_XDTR_INSINFO_II_LGDT: SET_BOTH(LGDT); break;7514 case VMX_XDTR_INSINFO_II_LIDT: SET_BOTH(LIDT); break;7515 }7516 break;7517 7518 case VMX_EXIT_LDTR_TR_ACCESS:7519 vmxHCReadExitInstrInfoVmcs(pVCpu, pVmxTransient);7520 switch (RT_BF_GET(pVmxTransient->ExitInstrInfo.u, VMX_BF_YYTR_INSINFO_INSTR_ID))7521 {7522 case VMX_YYTR_INSINFO_II_SLDT: SET_BOTH(SLDT); break;7523 case VMX_YYTR_INSINFO_II_STR: SET_BOTH(STR); break;7524 case VMX_YYTR_INSINFO_II_LLDT: SET_BOTH(LLDT); break;7525 case VMX_YYTR_INSINFO_II_LTR: SET_BOTH(LTR); break;7526 }7527 break;7528 7529 case VMX_EXIT_INVEPT: SET_BOTH(VMX_INVEPT); break;7530 case VMX_EXIT_RDTSCP: SET_BOTH(RDTSCP); break;7531 case VMX_EXIT_INVVPID: SET_BOTH(VMX_INVVPID); break;7532 case VMX_EXIT_WBINVD: SET_BOTH(WBINVD); break;7533 case VMX_EXIT_XSETBV: SET_BOTH(XSETBV); break;7534 case VMX_EXIT_RDRAND: SET_BOTH(RDRAND); break;7535 case VMX_EXIT_INVPCID: SET_BOTH(VMX_INVPCID); break;7536 case VMX_EXIT_VMFUNC: SET_BOTH(VMX_VMFUNC); break;7537 case VMX_EXIT_RDSEED: SET_BOTH(RDSEED); break;7538 case VMX_EXIT_XSAVES: SET_BOTH(XSAVES); break;7539 case VMX_EXIT_XRSTORS: SET_BOTH(XRSTORS); break;7540 7541 /* Events that aren't relevant at this point. */7542 case VMX_EXIT_EXT_INT:7543 case VMX_EXIT_INT_WINDOW:7544 case VMX_EXIT_NMI_WINDOW:7545 case VMX_EXIT_TPR_BELOW_THRESHOLD:7546 case VMX_EXIT_PREEMPT_TIMER:7547 case VMX_EXIT_IO_INSTR:7548 break;7549 7550 /* Errors and unexpected events. */7551 case VMX_EXIT_INIT_SIGNAL:7552 case VMX_EXIT_SIPI:7553 case VMX_EXIT_IO_SMI:7554 case VMX_EXIT_SMI:7555 case VMX_EXIT_ERR_INVALID_GUEST_STATE:7556 case VMX_EXIT_ERR_MSR_LOAD:7557 case VMX_EXIT_ERR_MACHINE_CHECK:7558 case VMX_EXIT_PML_FULL:7559 case VMX_EXIT_VIRTUALIZED_EOI:7560 break;7561 7562 default:7563 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));7564 break;7565 }7566 #undef SET_BOTH7567 #undef SET_EXIT7568 7569 /*7570 * Dtrace tracepoints go first. We do them here at once so we don't7571 * have to copy the guest state saving and stuff a few dozen times.7572 * Down side is that we've got to repeat the switch, though this time7573 * we use enmEvent since the probes are a subset of what DBGF does.7574 */7575 if (fDtrace1 || fDtrace2)7576 {7577 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);7578 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);7579 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx;7580 switch (enmEvent1)7581 {7582 /** @todo consider which extra parameters would be helpful for each probe. */7583 case DBGFEVENT_END: break;7584 case DBGFEVENT_XCPT_DE: VBOXVMM_XCPT_DE(pVCpu, pCtx); break;7585 case DBGFEVENT_XCPT_DB: VBOXVMM_XCPT_DB(pVCpu, pCtx, pCtx->dr[6]); break;7586 case DBGFEVENT_XCPT_BP: VBOXVMM_XCPT_BP(pVCpu, pCtx); break;7587 case DBGFEVENT_XCPT_OF: VBOXVMM_XCPT_OF(pVCpu, pCtx); break;7588 case DBGFEVENT_XCPT_BR: VBOXVMM_XCPT_BR(pVCpu, pCtx); break;7589 case DBGFEVENT_XCPT_UD: VBOXVMM_XCPT_UD(pVCpu, pCtx); break;7590 case DBGFEVENT_XCPT_NM: VBOXVMM_XCPT_NM(pVCpu, pCtx); break;7591 case DBGFEVENT_XCPT_DF: VBOXVMM_XCPT_DF(pVCpu, pCtx); break;7592 case DBGFEVENT_XCPT_TS: VBOXVMM_XCPT_TS(pVCpu, pCtx, uEventArg); break;7593 case DBGFEVENT_XCPT_NP: VBOXVMM_XCPT_NP(pVCpu, pCtx, uEventArg); break;7594 case DBGFEVENT_XCPT_SS: VBOXVMM_XCPT_SS(pVCpu, pCtx, uEventArg); break;7595 case DBGFEVENT_XCPT_GP: VBOXVMM_XCPT_GP(pVCpu, pCtx, uEventArg); break;7596 case DBGFEVENT_XCPT_PF: VBOXVMM_XCPT_PF(pVCpu, pCtx, uEventArg, pCtx->cr2); break;7597 case DBGFEVENT_XCPT_MF: VBOXVMM_XCPT_MF(pVCpu, pCtx); break;7598 case DBGFEVENT_XCPT_AC: VBOXVMM_XCPT_AC(pVCpu, pCtx); break;7599 case DBGFEVENT_XCPT_XF: VBOXVMM_XCPT_XF(pVCpu, pCtx); break;7600 case DBGFEVENT_XCPT_VE: VBOXVMM_XCPT_VE(pVCpu, pCtx); break;7601 case DBGFEVENT_XCPT_SX: VBOXVMM_XCPT_SX(pVCpu, pCtx, uEventArg); break;7602 case DBGFEVENT_INTERRUPT_SOFTWARE: VBOXVMM_INT_SOFTWARE(pVCpu, pCtx, (uint8_t)uEventArg); break;7603 case DBGFEVENT_INSTR_CPUID: VBOXVMM_INSTR_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;7604 case DBGFEVENT_INSTR_GETSEC: VBOXVMM_INSTR_GETSEC(pVCpu, pCtx); break;7605 case DBGFEVENT_INSTR_HALT: VBOXVMM_INSTR_HALT(pVCpu, pCtx); break;7606 case DBGFEVENT_INSTR_INVD: VBOXVMM_INSTR_INVD(pVCpu, pCtx); break;7607 case DBGFEVENT_INSTR_INVLPG: VBOXVMM_INSTR_INVLPG(pVCpu, pCtx); break;7608 case DBGFEVENT_INSTR_RDPMC: VBOXVMM_INSTR_RDPMC(pVCpu, pCtx); break;7609 case DBGFEVENT_INSTR_RDTSC: VBOXVMM_INSTR_RDTSC(pVCpu, pCtx); break;7610 case DBGFEVENT_INSTR_RSM: VBOXVMM_INSTR_RSM(pVCpu, pCtx); break;7611 case DBGFEVENT_INSTR_CRX_READ: VBOXVMM_INSTR_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;7612 case DBGFEVENT_INSTR_CRX_WRITE: VBOXVMM_INSTR_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;7613 case DBGFEVENT_INSTR_DRX_READ: VBOXVMM_INSTR_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;7614 case DBGFEVENT_INSTR_DRX_WRITE: VBOXVMM_INSTR_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;7615 case DBGFEVENT_INSTR_RDMSR: VBOXVMM_INSTR_RDMSR(pVCpu, pCtx, pCtx->ecx); break;7616 case DBGFEVENT_INSTR_WRMSR: VBOXVMM_INSTR_WRMSR(pVCpu, pCtx, pCtx->ecx,7617 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;7618 case DBGFEVENT_INSTR_MWAIT: VBOXVMM_INSTR_MWAIT(pVCpu, pCtx); break;7619 case DBGFEVENT_INSTR_MONITOR: VBOXVMM_INSTR_MONITOR(pVCpu, pCtx); break;7620 case DBGFEVENT_INSTR_PAUSE: VBOXVMM_INSTR_PAUSE(pVCpu, pCtx); break;7621 case DBGFEVENT_INSTR_SGDT: VBOXVMM_INSTR_SGDT(pVCpu, pCtx); break;7622 case DBGFEVENT_INSTR_SIDT: VBOXVMM_INSTR_SIDT(pVCpu, pCtx); break;7623 case DBGFEVENT_INSTR_LGDT: VBOXVMM_INSTR_LGDT(pVCpu, pCtx); break;7624 case DBGFEVENT_INSTR_LIDT: VBOXVMM_INSTR_LIDT(pVCpu, pCtx); break;7625 case DBGFEVENT_INSTR_SLDT: VBOXVMM_INSTR_SLDT(pVCpu, pCtx); break;7626 case DBGFEVENT_INSTR_STR: VBOXVMM_INSTR_STR(pVCpu, pCtx); break;7627 case DBGFEVENT_INSTR_LLDT: VBOXVMM_INSTR_LLDT(pVCpu, pCtx); break;7628 case DBGFEVENT_INSTR_LTR: VBOXVMM_INSTR_LTR(pVCpu, pCtx); break;7629 case DBGFEVENT_INSTR_RDTSCP: VBOXVMM_INSTR_RDTSCP(pVCpu, pCtx); break;7630 case DBGFEVENT_INSTR_WBINVD: VBOXVMM_INSTR_WBINVD(pVCpu, pCtx); break;7631 case DBGFEVENT_INSTR_XSETBV: VBOXVMM_INSTR_XSETBV(pVCpu, pCtx); break;7632 case DBGFEVENT_INSTR_RDRAND: VBOXVMM_INSTR_RDRAND(pVCpu, pCtx); break;7633 case DBGFEVENT_INSTR_RDSEED: VBOXVMM_INSTR_RDSEED(pVCpu, pCtx); break;7634 case DBGFEVENT_INSTR_XSAVES: VBOXVMM_INSTR_XSAVES(pVCpu, pCtx); break;7635 case DBGFEVENT_INSTR_XRSTORS: VBOXVMM_INSTR_XRSTORS(pVCpu, pCtx); break;7636 case DBGFEVENT_INSTR_VMM_CALL: VBOXVMM_INSTR_VMM_CALL(pVCpu, pCtx); break;7637 case DBGFEVENT_INSTR_VMX_VMCLEAR: VBOXVMM_INSTR_VMX_VMCLEAR(pVCpu, pCtx); break;7638 case DBGFEVENT_INSTR_VMX_VMLAUNCH: VBOXVMM_INSTR_VMX_VMLAUNCH(pVCpu, pCtx); break;7639 case DBGFEVENT_INSTR_VMX_VMPTRLD: VBOXVMM_INSTR_VMX_VMPTRLD(pVCpu, pCtx); break;7640 case DBGFEVENT_INSTR_VMX_VMPTRST: VBOXVMM_INSTR_VMX_VMPTRST(pVCpu, pCtx); break;7641 case DBGFEVENT_INSTR_VMX_VMREAD: VBOXVMM_INSTR_VMX_VMREAD(pVCpu, pCtx); break;7642 case DBGFEVENT_INSTR_VMX_VMRESUME: VBOXVMM_INSTR_VMX_VMRESUME(pVCpu, pCtx); break;7643 case DBGFEVENT_INSTR_VMX_VMWRITE: VBOXVMM_INSTR_VMX_VMWRITE(pVCpu, pCtx); break;7644 case DBGFEVENT_INSTR_VMX_VMXOFF: VBOXVMM_INSTR_VMX_VMXOFF(pVCpu, pCtx); break;7645 case DBGFEVENT_INSTR_VMX_VMXON: VBOXVMM_INSTR_VMX_VMXON(pVCpu, pCtx); break;7646 case DBGFEVENT_INSTR_VMX_INVEPT: VBOXVMM_INSTR_VMX_INVEPT(pVCpu, pCtx); break;7647 case DBGFEVENT_INSTR_VMX_INVVPID: VBOXVMM_INSTR_VMX_INVVPID(pVCpu, pCtx); break;7648 case DBGFEVENT_INSTR_VMX_INVPCID: VBOXVMM_INSTR_VMX_INVPCID(pVCpu, pCtx); break;7649 case DBGFEVENT_INSTR_VMX_VMFUNC: VBOXVMM_INSTR_VMX_VMFUNC(pVCpu, pCtx); break;7650 default: AssertMsgFailed(("enmEvent1=%d uExitReason=%d\n", enmEvent1, uExitReason)); break;7651 }7652 switch (enmEvent2)7653 {7654 /** @todo consider which extra parameters would be helpful for each probe. */7655 case DBGFEVENT_END: break;7656 case DBGFEVENT_EXIT_TASK_SWITCH: VBOXVMM_EXIT_TASK_SWITCH(pVCpu, pCtx); break;7657 case DBGFEVENT_EXIT_CPUID: VBOXVMM_EXIT_CPUID(pVCpu, pCtx, pCtx->eax, pCtx->ecx); break;7658 case DBGFEVENT_EXIT_GETSEC: VBOXVMM_EXIT_GETSEC(pVCpu, pCtx); break;7659 case DBGFEVENT_EXIT_HALT: VBOXVMM_EXIT_HALT(pVCpu, pCtx); break;7660 case DBGFEVENT_EXIT_INVD: VBOXVMM_EXIT_INVD(pVCpu, pCtx); break;7661 case DBGFEVENT_EXIT_INVLPG: VBOXVMM_EXIT_INVLPG(pVCpu, pCtx); break;7662 case DBGFEVENT_EXIT_RDPMC: VBOXVMM_EXIT_RDPMC(pVCpu, pCtx); break;7663 case DBGFEVENT_EXIT_RDTSC: VBOXVMM_EXIT_RDTSC(pVCpu, pCtx); break;7664 case DBGFEVENT_EXIT_RSM: VBOXVMM_EXIT_RSM(pVCpu, pCtx); break;7665 case DBGFEVENT_EXIT_CRX_READ: VBOXVMM_EXIT_CRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;7666 case DBGFEVENT_EXIT_CRX_WRITE: VBOXVMM_EXIT_CRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;7667 case DBGFEVENT_EXIT_DRX_READ: VBOXVMM_EXIT_DRX_READ(pVCpu, pCtx, (uint8_t)uEventArg); break;7668 case DBGFEVENT_EXIT_DRX_WRITE: VBOXVMM_EXIT_DRX_WRITE(pVCpu, pCtx, (uint8_t)uEventArg); break;7669 case DBGFEVENT_EXIT_RDMSR: VBOXVMM_EXIT_RDMSR(pVCpu, pCtx, pCtx->ecx); break;7670 case DBGFEVENT_EXIT_WRMSR: VBOXVMM_EXIT_WRMSR(pVCpu, pCtx, pCtx->ecx,7671 RT_MAKE_U64(pCtx->eax, pCtx->edx)); break;7672 case DBGFEVENT_EXIT_MWAIT: VBOXVMM_EXIT_MWAIT(pVCpu, pCtx); break;7673 case DBGFEVENT_EXIT_MONITOR: VBOXVMM_EXIT_MONITOR(pVCpu, pCtx); break;7674 case DBGFEVENT_EXIT_PAUSE: VBOXVMM_EXIT_PAUSE(pVCpu, pCtx); break;7675 case DBGFEVENT_EXIT_SGDT: VBOXVMM_EXIT_SGDT(pVCpu, pCtx); break;7676 case DBGFEVENT_EXIT_SIDT: VBOXVMM_EXIT_SIDT(pVCpu, pCtx); break;7677 case DBGFEVENT_EXIT_LGDT: VBOXVMM_EXIT_LGDT(pVCpu, pCtx); break;7678 case DBGFEVENT_EXIT_LIDT: VBOXVMM_EXIT_LIDT(pVCpu, pCtx); break;7679 case DBGFEVENT_EXIT_SLDT: VBOXVMM_EXIT_SLDT(pVCpu, pCtx); break;7680 case DBGFEVENT_EXIT_STR: VBOXVMM_EXIT_STR(pVCpu, pCtx); break;7681 case DBGFEVENT_EXIT_LLDT: VBOXVMM_EXIT_LLDT(pVCpu, pCtx); break;7682 case DBGFEVENT_EXIT_LTR: VBOXVMM_EXIT_LTR(pVCpu, pCtx); break;7683 case DBGFEVENT_EXIT_RDTSCP: VBOXVMM_EXIT_RDTSCP(pVCpu, pCtx); break;7684 case DBGFEVENT_EXIT_WBINVD: VBOXVMM_EXIT_WBINVD(pVCpu, pCtx); break;7685 case DBGFEVENT_EXIT_XSETBV: VBOXVMM_EXIT_XSETBV(pVCpu, pCtx); break;7686 case DBGFEVENT_EXIT_RDRAND: VBOXVMM_EXIT_RDRAND(pVCpu, pCtx); break;7687 case DBGFEVENT_EXIT_RDSEED: VBOXVMM_EXIT_RDSEED(pVCpu, pCtx); break;7688 case DBGFEVENT_EXIT_XSAVES: VBOXVMM_EXIT_XSAVES(pVCpu, pCtx); break;7689 case DBGFEVENT_EXIT_XRSTORS: VBOXVMM_EXIT_XRSTORS(pVCpu, pCtx); break;7690 case DBGFEVENT_EXIT_VMM_CALL: VBOXVMM_EXIT_VMM_CALL(pVCpu, pCtx); break;7691 case DBGFEVENT_EXIT_VMX_VMCLEAR: VBOXVMM_EXIT_VMX_VMCLEAR(pVCpu, pCtx); break;7692 case DBGFEVENT_EXIT_VMX_VMLAUNCH: VBOXVMM_EXIT_VMX_VMLAUNCH(pVCpu, pCtx); break;7693 case DBGFEVENT_EXIT_VMX_VMPTRLD: VBOXVMM_EXIT_VMX_VMPTRLD(pVCpu, pCtx); break;7694 case DBGFEVENT_EXIT_VMX_VMPTRST: VBOXVMM_EXIT_VMX_VMPTRST(pVCpu, pCtx); break;7695 case DBGFEVENT_EXIT_VMX_VMREAD: VBOXVMM_EXIT_VMX_VMREAD(pVCpu, pCtx); break;7696 case DBGFEVENT_EXIT_VMX_VMRESUME: VBOXVMM_EXIT_VMX_VMRESUME(pVCpu, pCtx); break;7697 case DBGFEVENT_EXIT_VMX_VMWRITE: VBOXVMM_EXIT_VMX_VMWRITE(pVCpu, pCtx); break;7698 case DBGFEVENT_EXIT_VMX_VMXOFF: VBOXVMM_EXIT_VMX_VMXOFF(pVCpu, pCtx); break;7699 case DBGFEVENT_EXIT_VMX_VMXON: VBOXVMM_EXIT_VMX_VMXON(pVCpu, pCtx); break;7700 case DBGFEVENT_EXIT_VMX_INVEPT: VBOXVMM_EXIT_VMX_INVEPT(pVCpu, pCtx); break;7701 case DBGFEVENT_EXIT_VMX_INVVPID: VBOXVMM_EXIT_VMX_INVVPID(pVCpu, pCtx); break;7702 case DBGFEVENT_EXIT_VMX_INVPCID: VBOXVMM_EXIT_VMX_INVPCID(pVCpu, pCtx); break;7703 case DBGFEVENT_EXIT_VMX_VMFUNC: VBOXVMM_EXIT_VMX_VMFUNC(pVCpu, pCtx); break;7704 case DBGFEVENT_EXIT_VMX_EPT_MISCONFIG: VBOXVMM_EXIT_VMX_EPT_MISCONFIG(pVCpu, pCtx); break;7705 case DBGFEVENT_EXIT_VMX_EPT_VIOLATION: VBOXVMM_EXIT_VMX_EPT_VIOLATION(pVCpu, pCtx); break;7706 case DBGFEVENT_EXIT_VMX_VAPIC_ACCESS: VBOXVMM_EXIT_VMX_VAPIC_ACCESS(pVCpu, pCtx); break;7707 case DBGFEVENT_EXIT_VMX_VAPIC_WRITE: VBOXVMM_EXIT_VMX_VAPIC_WRITE(pVCpu, pCtx); break;7708 default: AssertMsgFailed(("enmEvent2=%d uExitReason=%d\n", enmEvent2, uExitReason)); break;7709 }7710 }7711 7712 /*7713 * Fire of the DBGF event, if enabled (our check here is just a quick one,7714 * the DBGF call will do a full check).7715 *7716 * Note! DBGF sets DBGFEVENT_INTERRUPT_SOFTWARE in the bitmap.7717 * Note! If we have to events, we prioritize the first, i.e. the instruction7718 * one, in order to avoid event nesting.7719 */7720 PVMCC pVM = pVCpu->CTX_SUFF(pVM);7721 if ( enmEvent1 != DBGFEVENT_END7722 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent1))7723 {7724 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);7725 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent1, DBGFEVENTCTX_HM, 1, uEventArg);7726 if (rcStrict != VINF_SUCCESS)7727 return rcStrict;7728 }7729 else if ( enmEvent2 != DBGFEVENT_END7730 && DBGF_IS_EVENT_ENABLED(pVM, enmEvent2))7731 {7732 vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);7733 VBOXSTRICTRC rcStrict = DBGFEventGenericWithArgs(pVM, pVCpu, enmEvent2, DBGFEVENTCTX_HM, 1, uEventArg);7734 if (rcStrict != VINF_SUCCESS)7735 return rcStrict;7736 }7737 7738 return VINF_SUCCESS;7739 }7740 7741 7742 /**7743 * Single-stepping VM-exit filtering.7744 *7745 * This is preprocessing the VM-exits and deciding whether we've gotten far7746 * enough to return VINF_EM_DBG_STEPPED already. If not, normal VM-exit7747 * handling is performed.7748 *7749 * @returns Strict VBox status code (i.e. informational status codes too).7750 * @param pVCpu The cross context virtual CPU structure of the calling EMT.7751 * @param pVmxTransient The VMX-transient structure.7752 * @param pDbgState The debug state.7753 */7754 DECLINLINE(VBOXSTRICTRC) hmR0VmxRunDebugHandleExit(PVMCPUCC pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState)7755 {7756 /*7757 * Expensive (saves context) generic dtrace VM-exit probe.7758 */7759 uint32_t const uExitReason = pVmxTransient->uExitReason;7760 if (!VBOXVMM_R0_HMVMX_VMEXIT_ENABLED())7761 { /* more likely */ }7762 else7763 {7764 vmxHCReadExitQualVmcs(pVCpu, pVmxTransient);7765 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, HMVMX_CPUMCTX_EXTRN_ALL);7766 AssertRC(rc);7767 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, &pVCpu->cpum.GstCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQual);7768 }7769 7770 /*7771 * Check for host NMI, just to get that out of the way.7772 */7773 if (uExitReason != VMX_EXIT_XCPT_OR_NMI)7774 { /* normally likely */ }7775 else7776 {7777 vmxHCReadExitIntInfoVmcs(pVCpu, pVmxTransient);7778 uint32_t const uIntType = VMX_EXIT_INT_INFO_TYPE(pVmxTransient->uExitIntInfo);7779 if (uIntType == VMX_EXIT_INT_INFO_TYPE_NMI)7780 return hmR0VmxExitHostNmi(pVCpu, pVmxTransient->pVmcsInfo);7781 }7782 7783 /*7784 * Check for single stepping event if we're stepping.7785 */7786 if (pVCpu->hm.s.fSingleInstruction)7787 {7788 switch (uExitReason)7789 {7790 case VMX_EXIT_MTF:7791 return vmxHCExitMtf(pVCpu, pVmxTransient);7792 7793 /* Various events: */7794 case VMX_EXIT_XCPT_OR_NMI:7795 case VMX_EXIT_EXT_INT:7796 case VMX_EXIT_TRIPLE_FAULT:7797 case VMX_EXIT_INT_WINDOW:7798 case VMX_EXIT_NMI_WINDOW:7799 case VMX_EXIT_TASK_SWITCH:7800 case VMX_EXIT_TPR_BELOW_THRESHOLD:7801 case VMX_EXIT_APIC_ACCESS:7802 case VMX_EXIT_EPT_VIOLATION:7803 case VMX_EXIT_EPT_MISCONFIG:7804 case VMX_EXIT_PREEMPT_TIMER:7805 7806 /* Instruction specific VM-exits: */7807 case VMX_EXIT_CPUID:7808 case VMX_EXIT_GETSEC:7809 case VMX_EXIT_HLT:7810 case VMX_EXIT_INVD:7811 case VMX_EXIT_INVLPG:7812 case VMX_EXIT_RDPMC:7813 case VMX_EXIT_RDTSC:7814 case VMX_EXIT_RSM:7815 case VMX_EXIT_VMCALL:7816 case VMX_EXIT_VMCLEAR:7817 case VMX_EXIT_VMLAUNCH:7818 case VMX_EXIT_VMPTRLD:7819 case VMX_EXIT_VMPTRST:7820 case VMX_EXIT_VMREAD:7821 case VMX_EXIT_VMRESUME:7822 case VMX_EXIT_VMWRITE:7823 case VMX_EXIT_VMXOFF:7824 case VMX_EXIT_VMXON:7825 case VMX_EXIT_MOV_CRX:7826 case VMX_EXIT_MOV_DRX:7827 case VMX_EXIT_IO_INSTR:7828 case VMX_EXIT_RDMSR:7829 case VMX_EXIT_WRMSR:7830 case VMX_EXIT_MWAIT:7831 case VMX_EXIT_MONITOR:7832 case VMX_EXIT_PAUSE:7833 case VMX_EXIT_GDTR_IDTR_ACCESS:7834 case VMX_EXIT_LDTR_TR_ACCESS:7835 case VMX_EXIT_INVEPT:7836 case VMX_EXIT_RDTSCP:7837 case VMX_EXIT_INVVPID:7838 case VMX_EXIT_WBINVD:7839 case VMX_EXIT_XSETBV:7840 case VMX_EXIT_RDRAND:7841 case VMX_EXIT_INVPCID:7842 case VMX_EXIT_VMFUNC:7843 case VMX_EXIT_RDSEED:7844 case VMX_EXIT_XSAVES:7845 case VMX_EXIT_XRSTORS:7846 {7847 int rc = vmxHCImportGuestState(pVCpu, pVmxTransient->pVmcsInfo, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP);7848 AssertRCReturn(rc, rc);7849 if ( pVCpu->cpum.GstCtx.rip != pDbgState->uRipStart7850 || pVCpu->cpum.GstCtx.cs.Sel != pDbgState->uCsStart)7851 return VINF_EM_DBG_STEPPED;7852 break;7853 }7854 7855 /* Errors and unexpected events: */7856 case VMX_EXIT_INIT_SIGNAL:7857 case VMX_EXIT_SIPI:7858 case VMX_EXIT_IO_SMI:7859 case VMX_EXIT_SMI:7860 case VMX_EXIT_ERR_INVALID_GUEST_STATE:7861 case VMX_EXIT_ERR_MSR_LOAD:7862 case VMX_EXIT_ERR_MACHINE_CHECK:7863 case VMX_EXIT_PML_FULL:7864 case VMX_EXIT_VIRTUALIZED_EOI:7865 case VMX_EXIT_APIC_WRITE: /* Some talk about this being fault like, so I guess we must process it? */7866 break;7867 7868 default:7869 AssertMsgFailed(("Unexpected VM-exit=%#x\n", uExitReason));7870 break;7871 }7872 }7873 7874 /*7875 * Check for debugger event breakpoints and dtrace probes.7876 */7877 if ( uExitReason < RT_ELEMENTS(pDbgState->bmExitsToCheck) * 32U7878 && ASMBitTest(pDbgState->bmExitsToCheck, uExitReason) )7879 {7880 VBOXSTRICTRC rcStrict = hmR0VmxHandleExitDtraceEvents(pVCpu, pVmxTransient, uExitReason);7881 if (rcStrict != VINF_SUCCESS)7882 return rcStrict;7883 }7884 7885 /*7886 * Normal processing.7887 */7888 #ifdef HMVMX_USE_FUNCTION_TABLE7889 return g_aVMExitHandlers[uExitReason].pfn(pVCpu, pVmxTransient);7890 #else7891 return vmxHCHandleExit(pVCpu, pVmxTransient, uExitReason);7892 #endif7893 }7894 7895 7896 /**7897 6875 * Single steps guest code using hardware-assisted VMX. 7898 6876 * … … 7924 6902 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */ 7925 6903 VMXRUNDBGSTATE DbgState; 7926 hmR0VmxRunDebugStateInit(pVCpu, &VmxTransient, &DbgState);7927 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);6904 vmxHCRunDebugStateInit(pVCpu, &VmxTransient, &DbgState); 6905 vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState); 7928 6906 7929 6907 /* … … 7939 6917 7940 6918 /* Set up VM-execution controls the next two can respond to. */ 7941 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);6919 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState); 7942 6920 7943 6921 /* … … 7955 6933 7956 6934 /* Override any obnoxious code in the above two calls. */ 7957 hmR0VmxPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState);6935 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState); 7958 6936 7959 6937 /* … … 7987 6965 * Handle the VM-exit - we quit earlier on certain VM-exits, see hmR0VmxHandleExitDebug(). 7988 6966 */ 7989 rcStrict = hmR0VmxRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState);6967 rcStrict = vmxHCRunDebugHandleExit(pVCpu, &VmxTransient, &DbgState); 7990 6968 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitHandling, x); 7991 6969 if (rcStrict != VINF_SUCCESS) … … 8019 6997 */ 8020 6998 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo) 8021 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState);6999 vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState); 8022 7000 8023 7001 /* Restore all controls applied by hmR0VmxPreRunGuestDebugStateApply above. */ 8024 rcStrict = hmR0VmxRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict);7002 rcStrict = vmxHCRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict); 8025 7003 Assert(rcStrict == VINF_SUCCESS); 8026 7004 } … … 8051 7029 return rcStrict; 8052 7030 } 8053 8054 7031 8055 7032 /** @} */ -
trunk/src/VBox/VMM/VMMR3/NEMR3Native-darwin.cpp
r93728 r93748 1842 1842 1843 1843 /** 1844 * Handles an exit from hv_vcpu_run() - debug runloop variant. 1845 * 1846 * @returns VBox strict status code. 1847 * @param pVM The cross context VM structure. 1848 * @param pVCpu The cross context virtual CPU structure of the 1849 * calling EMT. 1850 * @param pVmxTransient The transient VMX structure. 1851 * @param pDbgState The debug state structure. 1852 */ 1853 static VBOXSTRICTRC nemR3DarwinHandleExitDebug(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, PVMXRUNDBGSTATE pDbgState) 1854 { 1855 uint32_t uExitReason; 1856 int rc = nemR3DarwinReadVmcs32(pVCpu, VMX_VMCS32_RO_EXIT_REASON, &uExitReason); 1857 AssertRC(rc); 1858 pVmxTransient->fVmcsFieldsRead = 0; 1859 pVmxTransient->fIsNestedGuest = false; 1860 pVmxTransient->uExitReason = VMX_EXIT_REASON_BASIC(uExitReason); 1861 pVmxTransient->fVMEntryFailed = VMX_EXIT_REASON_HAS_ENTRY_FAILED(uExitReason); 1862 1863 if (RT_UNLIKELY(pVmxTransient->fVMEntryFailed)) 1864 AssertLogRelMsgFailedReturn(("Running guest failed for CPU #%u: %#x %u\n", 1865 pVCpu->idCpu, pVmxTransient->uExitReason, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)), 1866 VERR_NEM_IPE_0); 1867 1868 /** @todo Only copy the state on demand (the R0 VT-x code saves some stuff unconditionally and the VMX template assumes that 1869 * when handling exits). */ 1870 rc = nemR3DarwinCopyStateFromHv(pVM, pVCpu, CPUMCTX_EXTRN_ALL); 1871 AssertRCReturn(rc, rc); 1872 1873 STAM_COUNTER_INC(&pVCpu->nem.s.pVmxStats->aStatExitReason[pVmxTransient->uExitReason & MASK_EXITREASON_STAT]); 1874 STAM_REL_COUNTER_INC(&pVCpu->nem.s.pVmxStats->StatExitAll); 1875 1876 return vmxHCRunDebugHandleExit(pVCpu, pVmxTransient, pDbgState); 1877 } 1878 1879 1880 /** 1844 1881 * Worker for nemR3NativeInit that loads the Hypervisor.framework shared library. 1845 1882 * … … 3125 3162 3126 3163 3127 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu) 3128 { 3129 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags)); 3130 #ifdef LOG_ENABLED 3131 if (LogIs3Enabled()) 3132 nemR3DarwinLogState(pVM, pVCpu); 3133 #endif 3134 3135 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9); 3164 /** 3165 * Runs the guest once until an exit occurs. 3166 * 3167 * @returns HV status code. 3168 * @param pVM The cross context VM structure. 3169 * @param pVCpu The cross context virtual CPU structure. 3170 * @param pVmxTransient The transient VMX execution structure. 3171 */ 3172 static hv_return_t nemR3DarwinRunGuest(PVM pVM, PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 3173 { 3174 TMNotifyStartOfExecution(pVM, pVCpu); 3175 3176 Assert(!pVCpu->nem.s.fCtxChanged); 3177 hv_return_t hrc; 3178 if (hv_vcpu_run_until) /** @todo Configur the deadline dynamically based on when the next timer triggers. */ 3179 hrc = hv_vcpu_run_until(pVCpu->nem.s.hVCpuId, mach_absolute_time() + 2 * RT_NS_1SEC_64 * pVM->nem.s.cMachTimePerNs); 3180 else 3181 hrc = hv_vcpu_run(pVCpu->nem.s.hVCpuId); 3182 3183 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC()); 3136 3184 3137 3185 /* 3138 * Try switch to NEM runloopstate.3186 * Sync the TPR shadow with our APIC state. 3139 3187 */ 3140 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED)) 3141 { /* likely */ } 3142 else 3143 { 3144 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED); 3145 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu)); 3146 return VINF_SUCCESS; 3147 } 3148 3188 if ( !pVmxTransient->fIsNestedGuest 3189 && (pVCpu->nem.s.VmcsInfo.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 3190 { 3191 uint64_t u64Tpr; 3192 hv_return_t hrc2 = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, HV_X86_TPR, &u64Tpr); 3193 Assert(hrc2 == HV_SUCCESS); 3194 3195 if (pVmxTransient->u8GuestTpr != (uint8_t)u64Tpr) 3196 { 3197 int rc = APICSetTpr(pVCpu, (uint8_t)u64Tpr); 3198 AssertRC(rc); 3199 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 3200 } 3201 } 3202 3203 return hrc; 3204 } 3205 3206 3207 /** 3208 * The normal runloop (no debugging features enabled). 3209 * 3210 * @returns Strict VBox status code. 3211 * @param pVM The cross context VM structure. 3212 * @param pVCpu The cross context virtual CPU structure. 3213 */ 3214 static VBOXSTRICTRC nemR3DarwinRunGuestNormal(PVM pVM, PVMCPU pVCpu) 3215 { 3149 3216 /* 3150 3217 * The run loop. … … 3153 3220 * everything every time. This will be optimized later. 3154 3221 */ 3155 3156 3222 VMXTRANSIENT VmxTransient; 3157 3223 RT_ZERO(VmxTransient); … … 3232 3298 pVCpu->nem.s.Event.fPending = false; 3233 3299 3234 TMNotifyStartOfExecution(pVM, pVCpu); 3235 3236 Assert(!pVCpu->nem.s.fCtxChanged); 3237 hv_return_t hrc; 3238 if (hv_vcpu_run_until) /** @todo Configur the deadline dynamically based on when the next timer triggers. */ 3239 hrc = hv_vcpu_run_until(pVCpu->nem.s.hVCpuId, mach_absolute_time() + 2 * RT_NS_1SEC_64 * pVM->nem.s.cMachTimePerNs); 3240 else 3241 hrc = hv_vcpu_run(pVCpu->nem.s.hVCpuId); 3242 3243 TMNotifyEndOfExecution(pVM, pVCpu, ASMReadTSC()); 3244 3245 /* 3246 * Sync the TPR shadow with our APIC state. 3247 */ 3248 if ( !VmxTransient.fIsNestedGuest 3249 && (pVCpu->nem.s.VmcsInfo.u32ProcCtls & VMX_PROC_CTLS_USE_TPR_SHADOW)) 3250 { 3251 uint64_t u64Tpr; 3252 hrc = hv_vcpu_read_register(pVCpu->nem.s.hVCpuId, HV_X86_TPR, &u64Tpr); 3253 Assert(hrc == HV_SUCCESS); 3254 3255 if (VmxTransient.u8GuestTpr != (uint8_t)u64Tpr) 3256 { 3257 rc = APICSetTpr(pVCpu, (uint8_t)u64Tpr); 3258 AssertRC(rc); 3259 ASMAtomicUoOrU64(&pVCpu->nem.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 3260 } 3261 } 3262 3300 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu, &VmxTransient); 3263 3301 if (hrc == HV_SUCCESS) 3264 3302 { … … 3285 3323 } /* the run loop */ 3286 3324 3325 return rcStrict; 3326 } 3327 3328 3329 /** 3330 * The debug runloop. 3331 * 3332 * @returns Strict VBox status code. 3333 * @param pVM The cross context VM structure. 3334 * @param pVCpu The cross context virtual CPU structure. 3335 */ 3336 static VBOXSTRICTRC nemR3DarwinRunGuestDebug(PVM pVM, PVMCPU pVCpu) 3337 { 3338 /* 3339 * The run loop. 3340 * 3341 * Current approach to state updating to use the sledgehammer and sync 3342 * everything every time. This will be optimized later. 3343 */ 3344 VMXTRANSIENT VmxTransient; 3345 RT_ZERO(VmxTransient); 3346 VmxTransient.pVmcsInfo = &pVCpu->nem.s.VmcsInfo; 3347 3348 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */ 3349 VMXRUNDBGSTATE DbgState; 3350 vmxHCRunDebugStateInit(pVCpu, &VmxTransient, &DbgState); 3351 vmxHCPreRunGuestDebugStateUpdate(pVCpu, &VmxTransient, &DbgState); 3352 3353 /* 3354 * Poll timers and run for a bit. 3355 */ 3356 /** @todo See if we cannot optimize this TMTimerPollGIP by only redoing 3357 * the whole polling job when timers have changed... */ 3358 uint64_t offDeltaIgnored; 3359 uint64_t const nsNextTimerEvt = TMTimerPollGIP(pVM, pVCpu, &offDeltaIgnored); NOREF(nsNextTimerEvt); 3360 3361 const bool fSingleStepping = DBGFIsStepping(pVCpu); 3362 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 3363 for (unsigned iLoop = 0;; iLoop++) 3364 { 3365 /* Set up VM-execution controls the next two can respond to. */ 3366 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState); 3367 3368 /* 3369 * Check and process force flag actions, some of which might require us to go back to ring-3. 3370 */ 3371 rcStrict = vmxHCCheckForceFlags(pVCpu, false /*fIsNestedGuest*/, fSingleStepping); 3372 if (rcStrict == VINF_SUCCESS) 3373 { /*likely */ } 3374 else 3375 { 3376 if (rcStrict == VINF_EM_RAW_TO_R3) 3377 rcStrict = VINF_SUCCESS; 3378 break; 3379 } 3380 3381 /* 3382 * Do not execute in HV if the A20 isn't enabled. 3383 */ 3384 if (PGMPhysIsA20Enabled(pVCpu)) 3385 { /* likely */ } 3386 else 3387 { 3388 rcStrict = VINF_EM_RESCHEDULE_REM; 3389 LogFlow(("NEM/%u: breaking: A20 disabled\n", pVCpu->idCpu)); 3390 break; 3391 } 3392 3393 /* 3394 * Evaluate events to be injected into the guest. 3395 * 3396 * Events in TRPM can be injected without inspecting the guest state. 3397 * If any new events (interrupts/NMI) are pending currently, we try to set up the 3398 * guest to cause a VM-exit the next time they are ready to receive the event. 3399 */ 3400 if (TRPMHasTrap(pVCpu)) 3401 vmxHCTrpmTrapToPendingEvent(pVCpu); 3402 3403 uint32_t fIntrState; 3404 rcStrict = vmxHCEvaluatePendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, &fIntrState); 3405 3406 /* 3407 * Event injection may take locks (currently the PGM lock for real-on-v86 case) and thus 3408 * needs to be done with longjmps or interrupts + preemption enabled. Event injection might 3409 * also result in triple-faulting the VM. 3410 * 3411 * With nested-guests, the above does not apply since unrestricted guest execution is a 3412 * requirement. Regardless, we do this here to avoid duplicating code elsewhere. 3413 */ 3414 rcStrict = vmxHCInjectPendingEvent(pVCpu, &pVCpu->nem.s.VmcsInfo, false /*fIsNestedGuest*/, fIntrState, fSingleStepping); 3415 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 3416 { /* likely */ } 3417 else 3418 { 3419 AssertMsg(rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fSingleStepping), 3420 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 3421 break; 3422 } 3423 3424 int rc = nemR3DarwinExportGuestState(pVM, pVCpu, &VmxTransient); 3425 AssertRCReturn(rc, rc); 3426 3427 LogFlowFunc(("Running vCPU\n")); 3428 pVCpu->nem.s.Event.fPending = false; 3429 3430 /* Override any obnoxious code in the above two calls. */ 3431 vmxHCPreRunGuestDebugStateApply(pVCpu, &VmxTransient, &DbgState); 3432 3433 hv_return_t hrc = nemR3DarwinRunGuest(pVM, pVCpu, &VmxTransient); 3434 if (hrc == HV_SUCCESS) 3435 { 3436 /* 3437 * Deal with the message. 3438 */ 3439 rcStrict = nemR3DarwinHandleExitDebug(pVM, pVCpu, &VmxTransient, &DbgState); 3440 if (rcStrict == VINF_SUCCESS) 3441 { /* hopefully likely */ } 3442 else 3443 { 3444 LogFlow(("NEM/%u: breaking: nemR3DarwinHandleExitDebug -> %Rrc\n", pVCpu->idCpu, VBOXSTRICTRC_VAL(rcStrict) )); 3445 STAM_REL_COUNTER_INC(&pVCpu->nem.s.StatBreakOnStatus); 3446 break; 3447 } 3448 //Assert(!pVCpu->cpum.GstCtx.fExtrn); 3449 } 3450 else 3451 { 3452 AssertLogRelMsgFailedReturn(("hv_vcpu_run()) failed for CPU #%u: %#x %u\n", 3453 pVCpu->idCpu, hrc, vmxHCCheckGuestState(pVCpu, &pVCpu->nem.s.VmcsInfo)), 3454 VERR_NEM_IPE_0); 3455 } 3456 } /* the run loop */ 3457 3458 /* Restore all controls applied by vmxHCPreRunGuestDebugStateApply above. */ 3459 return vmxHCRunDebugStateRevert(pVCpu, &VmxTransient, &DbgState, rcStrict); 3460 } 3461 3462 3463 VBOXSTRICTRC nemR3NativeRunGC(PVM pVM, PVMCPU pVCpu) 3464 { 3465 LogFlow(("NEM/%u: %04x:%08RX64 efl=%#08RX64 <=\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rflags)); 3466 #ifdef LOG_ENABLED 3467 if (LogIs3Enabled()) 3468 nemR3DarwinLogState(pVM, pVCpu); 3469 #endif 3470 3471 AssertReturn(NEMR3CanExecuteGuest(pVM, pVCpu), VERR_NEM_IPE_9); 3472 3473 /* 3474 * Try switch to NEM runloop state. 3475 */ 3476 if (VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED)) 3477 { /* likely */ } 3478 else 3479 { 3480 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC_NEM, VMCPUSTATE_STARTED_EXEC_NEM_CANCELED); 3481 LogFlow(("NEM/%u: returning immediately because canceled\n", pVCpu->idCpu)); 3482 return VINF_SUCCESS; 3483 } 3484 3485 VBOXSTRICTRC rcStrict; 3486 if ( !pVCpu->nem.s.fUseDebugLoop 3487 /** @todo dtrace && (!VBOXVMM_ANY_PROBES_ENABLED() || !hmR0VmxAnyExpensiveProbesEnabled()) */ 3488 && !DBGFIsStepping(pVCpu) 3489 && !pVCpu->CTX_SUFF(pVM)->dbgf.ro.cEnabledInt3Breakpoints) 3490 rcStrict = nemR3DarwinRunGuestNormal(pVM, pVCpu); 3491 else 3492 rcStrict = nemR3DarwinRunGuestDebug(pVM, pVCpu); 3287 3493 3288 3494 /*
Note:
See TracChangeset
for help on using the changeset viewer.