Changeset 46664 in vbox
- Timestamp:
- Jun 19, 2013 3:12:48 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 86555
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r46560 r46664 56 56 ifdef VBOX_WITH_OLD_VTX_CODE 57 57 VMM_COMMON_DEFS += VBOX_WITH_OLD_VTX_CODE 58 endif 59 ifdef VBOX_WITH_OLD_AMDV_CODE 60 VMM_COMMON_DEFS += VBOX_WITH_OLD_AMDV_CODE 58 61 endif 59 62 ifdef VBOX_WITH_SAFE_STR … … 529 532 VMMR0/HMR0.cpp \ 530 533 VMMR0/HMR0A.asm \ 531 VMMR0/HWSVMR0.cpp \532 534 VMMR0/PDMR0Device.cpp \ 533 535 VMMR0/PDMR0Driver.cpp \ … … 595 597 VMMR0_SOURCES += VMMR0/HMVMXR0.cpp 596 598 endif 599 ifdef VBOX_WITH_OLD_AMDV_CODE 600 VMMR0_SOURCES += VMMR0/HWSVMR0.cpp 601 else 602 VMMR0_SOURCES += VMMR0/HMSVMR0.cpp 603 endif 597 604 VMMR0_SOURCES.amd64 = \ 598 605 VMMR0/VMMR0JmpA-amd64.asm -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46659 r46664 19 19 * Header Files * 20 20 *******************************************************************************/ 21 #define LOG_GROUP LOG_GROUP_HM 22 #include <iprt/asm-amd64-x86.h> 23 #include <iprt/thread.h> 24 25 #include "HMInternal.h" 26 #include <VBox/vmm/vm.h> 27 #include "HWSVMR0.h" 28 #include <VBox/vmm/pdmapi.h> 29 #include <VBox/vmm/dbgf.h> 30 #include <VBox/vmm/iom.h> 31 #include <VBox/vmm/tm.h> 21 32 22 33 #ifdef DEBUG_ramshankar … … 75 86 do \ 76 87 { \ 77 p Ctx->reg.Sel = pVmcb->guest.REG.u16Sel; \78 p Ctx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \79 p Ctx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \80 p Ctx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \81 p Ctx->reg.u64Base = pVmcb->guest.REG.u64Base; \82 p Ctx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \88 pMixedCtx->reg.Sel = pVmcb->guest.REG.u16Sel; \ 89 pMixedCtx->reg.ValidSel = pVmcb->guest.REG.u16Sel; \ 90 pMixedCtx->reg.fFlags = CPUMSELREG_FLAGS_VALID; \ 91 pMixedCtx->reg.u32Limit = pVmcb->guest.REG.u32Limit; \ 92 pMixedCtx->reg.u64Base = pVmcb->guest.REG.u64Base; \ 93 pMixedCtx->reg.Attr.u = HMSVM_VMCB_2_CPU_SEG_ATTR(pVmcb->guest.REG.u16Attr); \ 83 94 } while (0) 84 95 /** @} */ … … 145 156 #define HMSVM_VMCB_CLEAN_AVIC RT_BIT(11) 146 157 /** Mask of all valid VMCB Clean bits. */ 147 #define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS 148 | HMSVM_VMCB_CLEAN_IOPM_MSRPM 149 | HMSVM_VMCB_CLEAN_ASID 150 | HMSVM_VMCB_CLEAN_TPR 151 | HMSVM_VMCB_CLEAN_NP 152 | HMSVM_VMCB_CLEAN_CRX 153 | HMSVM_VMCB_CLEAN_DRX 154 | HMSVM_VMCB_CLEAN_DT 155 | HMSVM_VMCB_CLEAN_SEG 156 | HMSVM_VMCB_CLEAN_CR2 157 | HMSVM_VMCB_CLEAN_LBR 158 #define HMSVM_VMCB_CLEAN_ALL ( HMSVM_VMCB_CLEAN_INTERCEPTS \ 159 | HMSVM_VMCB_CLEAN_IOPM_MSRPM \ 160 | HMSVM_VMCB_CLEAN_ASID \ 161 | HMSVM_VMCB_CLEAN_TPR \ 162 | HMSVM_VMCB_CLEAN_NP \ 163 | HMSVM_VMCB_CLEAN_CRX_EFER \ 164 | HMSVM_VMCB_CLEAN_DRX \ 165 | HMSVM_VMCB_CLEAN_DT \ 166 | HMSVM_VMCB_CLEAN_SEG \ 167 | HMSVM_VMCB_CLEAN_CR2 \ 168 | HMSVM_VMCB_CLEAN_LBR \ 158 169 | HMSVM_VMCB_CLEAN_AVIC) 159 170 /** @} */ … … 212 223 *******************************************************************************/ 213 224 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite); 225 static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu); 226 227 HMSVM_EXIT_DECL hmR0SvmExitIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 228 HMSVM_EXIT_DECL hmR0SvmExitWbinvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 229 HMSVM_EXIT_DECL hmR0SvmExitInvd(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 230 HMSVM_EXIT_DECL hmR0SvmExitCpuid(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 231 HMSVM_EXIT_DECL hmR0SvmExitRdtsc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 232 HMSVM_EXIT_DECL hmR0SvmExitRdtscp(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 233 HMSVM_EXIT_DECL hmR0SvmExitRdpmc(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 234 HMSVM_EXIT_DECL hmR0SvmExitInvlpg(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 235 HMSVM_EXIT_DECL hmR0SvmExitHlt(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 236 HMSVM_EXIT_DECL hmR0SvmExitMonitor(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 237 HMSVM_EXIT_DECL hmR0SvmExitMwait(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 238 HMSVM_EXIT_DECL hmR0SvmExitShutdown(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 239 HMSVM_EXIT_DECL hmR0SvmExitReadCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 240 HMSVM_EXIT_DECL hmR0SvmExitWriteCRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 241 HMSVM_EXIT_DECL hmR0SvmExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 242 HMSVM_EXIT_DECL hmR0SvmExitMsr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 243 HMSVM_EXIT_DECL hmR0SvmExitReadDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 244 HMSVM_EXIT_DECL hmR0SvmExitWriteDRx(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 245 HMSVM_EXIT_DECL hmR0SvmExitIOInstr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 246 HMSVM_EXIT_DECL hmR0SvmExitNestedPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 247 HMSVM_EXIT_DECL hmR0SvmExitVIntr(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 248 HMSVM_EXIT_DECL hmR0SvmExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 249 HMSVM_EXIT_DECL hmR0SvmExitVmmCall(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 250 HMSVM_EXIT_DECL hmR0SvmExitXcptPF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 251 HMSVM_EXIT_DECL hmR0SvmExitXcptNM(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 252 HMSVM_EXIT_DECL hmR0SvmExitXcptMF(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 253 HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient); 214 254 215 255 DECLINLINE(int) hmR0SvmHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient); … … 331 371 332 372 /* Set all bits to intercept all IO accesses. */ 333 ASMMemFill32(pVM->hm.s.svm.pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff)); 373 ASMMemFill32(g_pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff)); 374 return VINF_SUCCESS; 334 375 } 335 376 … … 342 383 if (g_hMemObjIOBitmap != NIL_RTR0MEMOBJ) 343 384 { 344 RTR0MemObjFree( pVM->hm.s.svm.hMemObjIOBitmap, false /* fFreeMappings */);385 RTR0MemObjFree(g_hMemObjIOBitmap, false /* fFreeMappings */); 345 386 g_pvIOBitmap = NULL; 346 387 g_HCPhysIOBitmap = 0; … … 424 465 for (VMCPUID i = 0; i < pVM->cCpus; i++) 425 466 { 467 PVMCPU pVCpu = &pVM->aCpus[i]; 468 426 469 /* 427 470 * Allocate one page for the host-context VM control block (VMCB). This is used for additional host-state (such as … … 455 498 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */); 456 499 if (RT_FAILURE(rc)) 457 failure_cleanup;500 goto failure_cleanup; 458 501 459 502 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap); … … 466 509 467 510 failure_cleanup: 468 hmR0SvmFree VMStructs(pVM);511 hmR0SvmFreeStructs(pVM); 469 512 return rc; 470 513 } … … 479 522 VMMR0DECL(int) SVMR0TermVM(PVM pVM) 480 523 { 481 hmR0SvmFree VMStructs(pVM);524 hmR0SvmFreeStructs(pVM); 482 525 return VINF_SUCCESS; 483 526 } … … 492 535 * @param enmWrite MSR write permissions. 493 536 */ 494 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, u int32_tuMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)537 static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, unsigned uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite) 495 538 { 496 539 unsigned ulBit; … … 541 584 ASMBitClear(pbMsrBitmap, ulBit + 1); 542 585 586 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 543 587 pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 544 588 } … … 821 865 } 822 866 else 823 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE) 867 Assert(pVmcb->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_ENTIRE); 824 868 #endif 825 869 } … … 900 944 901 945 902 DECLINLINE(void) hmR0SvmAddXcptIntercept(uint32_t u32Xcpt) 903 { 904 if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt)) 946 /** 947 * Adds an exception to the intercept exception bitmap in the VMCB and updates 948 * the corresponding VMCB Clean Bit. 949 * 950 * @param pVmcb Pointer to the VMCB. 951 * @param u32Xcpt The value of the exception (X86_XCPT_*). 952 */ 953 DECLINLINE(void) hmR0SvmAddXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt) 954 { 955 if (!(pVmcb->ctrl.u32InterceptException & RT_BIT(u32Xcpt))) 905 956 { 906 957 pVmcb->ctrl.u32InterceptException |= RT_BIT(u32Xcpt); … … 909 960 } 910 961 911 DECLINLINE(void) hmR0SvmRemoveXcptIntercept(uint32_t u32Xcpt) 962 963 /** 964 * Removes an exception from the intercept-exception bitmap in the VMCB and 965 * updates the corresponding VMCB Clean Bit. 966 * 967 * @param pVmcb Pointer to the VMCB. 968 * @param u32Xcpt The value of the exception (X86_XCPT_*). 969 */ 970 DECLINLINE(void) hmR0SvmRemoveXcptIntercept(PSVMVMCB pVmcb, uint32_t u32Xcpt) 912 971 { 913 972 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS … … 936 995 * Guest CR0. 937 996 */ 997 PVM pVM = pVCpu->CTX_SUFF(pVM); 938 998 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 939 999 { … … 971 1031 { 972 1032 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */ 973 u 32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */1033 u64GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */ 974 1034 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */ 975 1035 } … … 979 1039 */ 980 1040 if (fInterceptNM) 981 hmR0SvmAddXcptIntercept( X86_XCPT_NM);1041 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_NM); 982 1042 else 983 hmR0SvmRemoveXcptIntercept( X86_XCPT_NM);1043 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_NM); 984 1044 985 1045 if (fInterceptMF) 986 hmR0SvmAddXcptIntercept( X86_XCPT_MF);1046 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_MF); 987 1047 else 988 hmR0SvmRemoveXcptIntercept( X86_XCPT_MF);1048 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_MF); 989 1049 990 1050 pVmcb->guest.u64CR0 = u64GuestCR0; … … 1160 1220 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". 1161 1221 */ 1162 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR 1222 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_EFER_MSR) 1163 1223 { 1164 1224 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; … … 1198 1258 * 1199 1259 * @param pVCpu Pointer to the VMCPU. 1260 * @param pVmcb Pointer to the VMCB. 1200 1261 * @param pCtx Pointer to the guest-CPU context. 1201 1262 * … … 1203 1264 * @remarks Requires EFLAGS to be up-to-date in the VMCB! 1204 1265 */ 1205 DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, P CPUMCTX pCtx)1266 DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx) 1206 1267 { 1207 1268 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)) … … 1230 1291 } 1231 1292 1293 PVM pVM = pVCpu->CTX_SUFF(pVM); 1232 1294 if (CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 1233 1295 { 1234 1296 if (!CPUMIsHyperDebugStateActive(pVCpu)) 1235 1297 { 1236 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);1298 int rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 1237 1299 AssertRC(rc); 1238 1300 … … 1249 1311 if (!CPUMIsGuestDebugStateActive(pVCpu)) 1250 1312 { 1251 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);1313 int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 1252 1314 AssertRC(rc); 1253 1315 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); … … 1263 1325 1264 1326 if (fInterceptDB) 1265 hmR0SvmAddXcptIntercept( X86_XCPT_DB);1327 hmR0SvmAddXcptIntercept(pVmcb, X86_XCPT_DB); 1266 1328 else 1267 hmR0SvmRemoveXcptIntercept( X86_XCPT_DB);1329 hmR0SvmRemoveXcptIntercept(pVmcb, X86_XCPT_DB); 1268 1330 1269 1331 if (fInterceptMovDRx) … … 1318 1380 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive) 1319 1381 { 1320 pCtx->msrLSTAR = u8 LastTPR;1382 pCtx->msrLSTAR = u8Tpr; 1321 1383 1322 1384 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ … … 1337 1399 1338 1400 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */ 1339 if (fPending )1401 if (fPendingIntr) 1340 1402 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 1341 1403 else … … 1570 1632 /** @todo Verify this. */ 1571 1633 if ( !pMixedCtx->cs.Attr.n.u1Granularity 1572 && 1573 && 1634 && pMixedCtx->cs.Attr.n.u1Present 1635 && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff)) 1574 1636 { 1575 1637 Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff); … … 1577 1639 } 1578 1640 #ifdef VBOX_STRICT 1579 # define HMSVM_ASSERT_SE L_GRANULARITY(reg) \1641 # define HMSVM_ASSERT_SEG_GRANULARITY(reg) \ 1580 1642 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \ 1581 1643 || ( pMixedCtx->reg.Attr.n.u1Granularity \ 1582 1644 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \ 1583 1645 : pMixedCtx->reg.u32Limit <= UINT32_C(0xfffff)), \ 1584 ("Invalid Segment Attributes %#x %#x %#llx\n", pMixedCtx->reg.u32Limit, 1646 ("Invalid Segment Attributes %#x %#x %#llx\n", pMixedCtx->reg.u32Limit, \ 1585 1647 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base)) 1586 1648 … … 1625 1687 * This is done as the very last step of syncing the guest state, as PGMUpdateCR3() may cause longjmp's to ring-3. 1626 1688 */ 1627 if ( pV M->hm.s.fNestedPaging1689 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging 1628 1690 && pMixedCtx->cr3 != pVmcb->guest.u64CR3) 1629 1691 { … … 1669 1731 CPUMR0LoadHostDebugState(pVM, pVCpu); 1670 1732 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1733 #ifdef VBOX_STRICT 1734 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1671 1735 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1672 1736 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1737 #endif 1673 1738 } 1674 1739 … … 1854 1919 1855 1920 1856 /** 1857 * Converts any TRPM trap into a pending SVM event. This is typically used when 1921 1922 /** 1923 * Converts any TRPM trap into a pending HM event. This is typically used when 1858 1924 * entering from ring-3 (not longjmp returns). 1859 1925 * … … 1874 1940 AssertRC(rc); 1875 1941 1876 PSVMEVENT pEvent = &pVCpu->hm.s.Event;1877 pEvent->u = 0;1878 pEvent->n.u1Valid = 1;1942 SVMEVENT Event; 1943 Event.u = 0; 1944 Event.n.u1Valid = 1; 1879 1945 1880 1946 /* Refer AMD spec. 15.20 "Event Injection" for the format. */ 1881 1947 if (enmTrpmEvent == TRPM_TRAP) 1882 1948 { 1883 pEvent->n.u3Type = SVM_EVENT_EXCEPTION;1949 Event.n.u3Type = SVM_EVENT_EXCEPTION; 1884 1950 switch (uVector) 1885 1951 { … … 1892 1958 case X86_XCPT_AC: 1893 1959 { 1894 pEvent->n.u32ErrorCode = uErrCode;1895 pEvent->n.u1ErrorCodeValid = 1;1960 Event.n.u32ErrorCode = uErrCode; 1961 Event.n.u1ErrorCodeValid = 1; 1896 1962 break; 1897 1963 } … … 1901 1967 { 1902 1968 if (uVector == X86_XCPT_NMI) 1903 pEvent->n.u3Type = SVM_EVENT_NMI;1969 Event.n.u3Type = SVM_EVENT_NMI; 1904 1970 else 1905 pEvent->n.u3Type = SVM_EVENT_EXTERNAL_IRQ;1971 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 1906 1972 } 1907 1973 else if (enmTrpmEvent == TRPM_SOFTWARE_INT) 1908 pEvent->n.u3Type = SVM_EVENT_SOFTWARE_INT;1974 Event.n.u3Type = SVM_EVENT_SOFTWARE_INT; 1909 1975 else 1910 1976 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent)); … … 1913 1979 AssertRC(rc); 1914 1980 1915 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%#x uErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector, 1916 pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode)); 1981 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%RTbool uErrorCode=%#RX32\n", Event.u, Event.n.u8Vector, 1982 !!Event.n.u1ErrorCodeValid, Event.n.u32ErrorCode)); 1983 hmR0SvmSetPendingEvent(pVCpu, &Event, GCPtrFaultAddress); 1917 1984 } 1918 1985 … … 1929 1996 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP); 1930 1997 1931 PSVMEVENT pEvent = &pVCpu->hm.s.Event; 1932 uint8_t uVector = pEvent->n.u8Vector; 1933 uint8_t uVectorType = pEvent->n.u3Type; 1998 SVMEVENT Event; 1999 Event.u = pVCpu->hm.s.Event.u64IntrInfo; 2000 2001 uint8_t uVector = Event.n.u8Vector; 2002 uint8_t uVectorType = Event.n.u3Type; 1934 2003 1935 2004 TRPMEVENT enmTrapType; 1936 2005 switch (uVectorType) 1937 2006 { 1938 case SVM_EVENT_EXTERNAL_IRQ 2007 case SVM_EVENT_EXTERNAL_IRQ: 1939 2008 case SVM_EVENT_NMI: 1940 2009 enmTrapType = TRPM_HARDWARE_INT; … … 1957 2026 AssertRC(rc); 1958 2027 1959 if ( pEvent->n.u1ErrorCodeValid)1960 TRPMSetErrorCode(pVCpu, pEvent->n.u32ErrorCode);2028 if (Event.n.u1ErrorCodeValid) 2029 TRPMSetErrorCode(pVCpu, Event.n.u32ErrorCode); 1961 2030 1962 2031 if ( uVectorType == SVM_EVENT_EXCEPTION … … 2061 2130 { 2062 2131 pVCpu->hm.s.Event.fPending = false; 2063 hmR0SvmInjectEvent (pVCpu, pVmcb, pCtx, &Event);2132 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event); 2064 2133 } 2065 2134 else … … 2076 2145 Event.n.u3Type = SVM_EVENT_NMI; 2077 2146 2078 hmR0SvmInjectEvent (pVCpu, pVmcb, pCtx, &Event);2147 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event); 2079 2148 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 2080 2149 } … … 2090 2159 { 2091 2160 uint8_t u8Interrupt; 2092 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);2161 int rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 2093 2162 if (RT_SUCCESS(rc)) 2094 2163 { … … 2099 2168 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 2100 2169 2101 hmR0SvmInjectEvent (pVCpu, pVmcb, pCtx, &Event);2170 hmR0SvmInjectEventVmcb(pVCpu, pVmcb, pCtx, &Event); 2102 2171 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 2103 2172 } … … 2289 2358 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 2290 2359 { 2291 rc = PGMUpdateCR3(pVCpu, pCtx->cr3);2360 int rc = PGMUpdateCR3(pVCpu, pCtx->cr3); 2292 2361 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3); 2293 2362 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); … … 2297 2366 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 2298 2367 { 2299 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));2368 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 2300 2369 if (rc != VINF_SUCCESS) 2301 2370 { 2302 AssertRC(rc);2303 2371 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc)); 2304 2372 return rc; … … 2312 2380 { 2313 2381 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 2314 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;2382 int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 2315 2383 Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc)); 2316 2384 return rc; … … 2340 2408 } 2341 2409 2342 /* Paranoia. */2343 Assert(rc != VERR_EM_INTERPRETER);2344 2410 return VINF_SUCCESS; 2345 2411 } … … 2362 2428 * @retval VINF_* scheduling changes, we have to go back to ring-3. 2363 2429 * 2430 * @param pVM Pointer to the VM. 2364 2431 * @param pVCpu Pointer to the VMCPU. 2365 2432 * @param pCtx Pointer to the guest-CPU context. 2366 2433 * @param pSvmTransient Pointer to the SVM transient structure. 2367 2434 */ 2368 DECLIN E(int) hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient)2435 DECLINLINE(int) hmR0SvmPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 2369 2436 { 2370 2437 /* Check force flag actions that might require us to go back to ring-3. */ … … 2430 2497 * should be done wrt to the VMCB Clean Bit, but we'll find out the 2431 2498 * hard way. */ 2499 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2432 2500 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging; 2433 2501 … … 2469 2537 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 2470 2538 uint64_t u64GuestTscAux = 0; 2471 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux);2539 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux); 2472 2540 AssertRC(rc2); 2473 2541 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux); … … 2518 2586 * unconditionally when it is safe to do so. 2519 2587 */ 2520 DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, rcVMRun)2588 DECLINLINE(void) hmR0SvmPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PSVMTRANSIENT pSvmTransient, int rcVMRun) 2521 2589 { 2522 2590 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); … … 2565 2633 /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */ 2566 2634 if ( pVM->hm.s.fTPRPatchingActive 2567 && (p Ctx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)2635 && (pMixedCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr) 2568 2636 { 2569 int rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff);2637 int rc = PDMApicSetTPR(pVCpu, (pMixedCtx->msrLSTAR & 0xff)); 2570 2638 AssertRC(rc); 2571 2639 } … … 2630 2698 || SvmTransient.u64ExitCode == (uint64_t)SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */ 2631 2699 { 2632 if (rc == VINF_SUCCESS) ;2700 if (rc == VINF_SUCCESS) 2633 2701 rc = VERR_SVM_INVALID_GUEST_STATE; 2634 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx , &SvmTransient);2702 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx); 2635 2703 return rc; 2636 2704 } … … 2749 2817 default: 2750 2818 { 2751 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3: 2752 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: 2753 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: 2754 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15: 2755 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient); 2756 2757 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3: 2758 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: 2759 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: 2760 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15: 2761 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient); 2762 2763 case SVM_EXIT_TASK_SWITCH: 2764 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient); 2765 2766 case SVM_EXIT_VMMCALL: 2767 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient); 2768 2769 case SVM_EXIT_INVLPGA: 2770 case SVM_EXIT_RSM: 2771 case SVM_EXIT_VMRUN: 2772 case SVM_EXIT_VMLOAD: 2773 case SVM_EXIT_VMSAVE: 2774 case SVM_EXIT_STGI: 2775 case SVM_EXIT_CLGI: 2776 case SVM_EXIT_SKINIT: 2777 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); 2819 switch (pSvmTransient->u64ExitCode) 2820 { 2821 case SVM_EXIT_READ_DR0: case SVM_EXIT_READ_DR1: case SVM_EXIT_READ_DR2: case SVM_EXIT_READ_DR3: 2822 case SVM_EXIT_READ_DR6: case SVM_EXIT_READ_DR7: case SVM_EXIT_READ_DR8: case SVM_EXIT_READ_DR9: 2823 case SVM_EXIT_READ_DR10: case SVM_EXIT_READ_DR11: case SVM_EXIT_READ_DR12: case SVM_EXIT_READ_DR13: 2824 case SVM_EXIT_READ_DR14: case SVM_EXIT_READ_DR15: 2825 return hmR0SvmExitReadDRx(pVCpu, pCtx, pSvmTransient); 2826 2827 case SVM_EXIT_WRITE_DR0: case SVM_EXIT_WRITE_DR1: case SVM_EXIT_WRITE_DR2: case SVM_EXIT_WRITE_DR3: 2828 case SVM_EXIT_WRITE_DR6: case SVM_EXIT_WRITE_DR7: case SVM_EXIT_WRITE_DR8: case SVM_EXIT_WRITE_DR9: 2829 case SVM_EXIT_WRITE_DR10: case SVM_EXIT_WRITE_DR11: case SVM_EXIT_WRITE_DR12: case SVM_EXIT_WRITE_DR13: 2830 case SVM_EXIT_WRITE_DR14: case SVM_EXIT_WRITE_DR15: 2831 return hmR0SvmExitWriteDRx(pVCpu, pCtx, pSvmTransient); 2832 2833 case SVM_EXIT_TASK_SWITCH: 2834 return hmR0SvmExitTaskSwitch(pVCpu, pCtx, pSvmTransient); 2835 2836 case SVM_EXIT_VMMCALL: 2837 return hmR0SvmExitVmmCall(pVCpu, pCtx, pSvmTransient); 2838 2839 case SVM_EXIT_INVLPGA: 2840 case SVM_EXIT_RSM: 2841 case SVM_EXIT_VMRUN: 2842 case SVM_EXIT_VMLOAD: 2843 case SVM_EXIT_VMSAVE: 2844 case SVM_EXIT_STGI: 2845 case SVM_EXIT_CLGI: 2846 case SVM_EXIT_SKINIT: 2847 return hmR0SvmExitSetPendingXcptUD(pVCpu, pCtx, pSvmTransient); 2778 2848 2779 2849 #ifdef HMSVM_ALWAYS_TRAP_ALL_XCPTS 2780 case SVM_EXIT_EXCEPTION_0: /* X86_XCPT_DE */ 2781 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */ 2782 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */ 2783 case SVM_EXIT_EXCEPTION_B: /* X86_XCPT_NP */ 2784 case SVM_EXIT_EXCEPTION_C: /* X86_XCPT_SS */ 2785 case SVM_EXIT_EXCEPTION_D: /* X86_XCPT_GP */ 2786 { 2787 SVMEVENT Event; 2788 Event.u = 0; 2789 Event.n.u1Valid = 1; 2790 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2791 Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0; 2792 2793 switch (Event.n.u8Vector) 2850 case SVM_EXIT_EXCEPTION_0: /* X86_XCPT_DE */ 2851 case SVM_EXIT_EXCEPTION_3: /* X86_XCPT_BP */ 2852 case SVM_EXIT_EXCEPTION_6: /* X86_XCPT_UD */ 2853 case SVM_EXIT_EXCEPTION_B: /* X86_XCPT_NP */ 2854 case SVM_EXIT_EXCEPTION_C: /* X86_XCPT_SS */ 2855 case SVM_EXIT_EXCEPTION_D: /* X86_XCPT_GP */ 2794 2856 { 2795 case X86_XCPT_GP: 2796 Event.n.u1ErrorCodeValid = 1; 2797 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2798 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 2799 break; 2800 case X86_XCPT_BP: 2801 /** Saves the wrong EIP on the stack (pointing to the int3) instead of the 2802 * next instruction. */ 2803 /** @todo Investigate this later. */ 2804 break; 2805 case X86_XCPT_DE: 2806 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); 2807 break; 2808 case X86_XCPT_UD: 2809 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 2810 break; 2811 case X86_XCPT_SS: 2812 Event.n.u1ErrorCodeValid = 1; 2813 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2814 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 2815 break; 2816 case X86_XCPT_NP: 2817 Event.n.u1ErrorCodeValid = 1; 2818 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2819 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 2820 break; 2857 SVMEVENT Event; 2858 Event.u = 0; 2859 Event.n.u1Valid = 1; 2860 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2861 Event.n.u8Vector = pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0; 2862 2863 switch (Event.n.u8Vector) 2864 { 2865 case X86_XCPT_GP: 2866 Event.n.u1ErrorCodeValid = 1; 2867 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2868 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 2869 break; 2870 case X86_XCPT_BP: 2871 /** Saves the wrong EIP on the stack (pointing to the int3) instead of the 2872 * next instruction. */ 2873 /** @todo Investigate this later. */ 2874 break; 2875 case X86_XCPT_DE: 2876 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); 2877 break; 2878 case X86_XCPT_UD: 2879 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 2880 break; 2881 case X86_XCPT_SS: 2882 Event.n.u1ErrorCodeValid = 1; 2883 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2884 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 2885 break; 2886 case X86_XCPT_NP: 2887 Event.n.u1ErrorCodeValid = 1; 2888 Event.n.u32ErrorCode = pVmcb->ctrl.u64ExitInfo1; 2889 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 2890 break; 2891 } 2892 Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); 2893 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2894 return VINF_SUCCESS; 2821 2895 } 2822 Log4(("#Xcpt: Vector=%#x at CS:RIP=%04x:%RGv\n", Event.n.u8Vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); 2823 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2824 return VINF_SUCCESS; 2825 } 2826 #endif 2827 2828 default: 2829 { 2830 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit code %#x\n", u32ExitCode)); 2831 return VERR_SVM_UNEXPECTED_EXIT; 2896 #endif /* HMSVM_ALWAYS_TRAP_ALL_XCPTS */ 2897 2898 default: 2899 { 2900 AssertMsgFailed(("hmR0SvmHandleExit: Unexpected exit code %#x\n", u32ExitCode)); 2901 return VERR_SVM_UNEXPECTED_EXIT; 2902 } 2832 2903 } 2833 2904 } … … 2861 2932 if (VMMR0IsLogFlushDisabled(pVCpu)) \ 2862 2933 HMSVM_ASSERT_PREEMPT_CPUID(); \ 2863 HMSVM_STOP_EXIT_DISPATCH_PROF(); \2864 2934 } while (0) 2865 2935 #else /* Release builds */ … … 2892 2962 2893 2963 GCPtrPage = Param1.val.val64; 2894 rc = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu, pRegFrame, GCPtrPage); 2964 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVCpu->CTX_SUFF(pVM), pVCpu, pRegFrame, GCPtrPage); 2965 rc = VBOXSTRICTRC_VAL(rc2); 2895 2966 } 2896 2967 else … … 2951 3022 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2952 3023 Event.n.u8Vector = X86_XCPT_UD; 2953 hmR0SvmSetPendingEvent(pVCpu, &Event );3024 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2954 3025 } 2955 3026 … … 2967 3038 Event.n.u3Type = SVM_EVENT_EXCEPTION; 2968 3039 Event.n.u8Vector = X86_XCPT_DB; 2969 hmR0SvmSetPendingEvent(pVCpu, &Event );3040 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 2970 3041 } 2971 3042 … … 2994 3065 pCtx->cr2 = uFaultAddress; 2995 3066 2996 hmR0SvmSetPendingEvent(pVCpu, &Event );3067 hmR0SvmSetPendingEvent(pVCpu, &Event, uFaultAddress); 2997 3068 } 2998 3069 … … 3011 3082 Event.n.u3Type = SVM_EVENT_EXCEPTION; 3012 3083 Event.n.u8Vector = X86_XCPT_NM; 3013 hmR0SvmSetPendingEvent(pVCpu, &Event );3084 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3014 3085 } 3015 3086 … … 3027 3098 Event.n.u3Type = SVM_EVENT_EXCEPTION; 3028 3099 Event.n.u8Vector = X86_XCPT_MF; 3029 hmR0SvmSetPendingEvent(pVCpu, &Event );3100 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3030 3101 } 3031 3102 … … 3045 3116 Event.n.u1ErrorCodeValid = 1; 3046 3117 Event.n.u32ErrorCode = 0; 3047 hmR0SvmSetPendingEvent(pVCpu, &Event );3118 hmR0SvmSetPendingEvent(pVCpu, &Event, 0 /* GCPtrFaultAddress */); 3048 3119 } 3049 3120 … … 3165 3236 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid) 3166 3237 { 3238 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector; 3239 uint8_t uExitVector = UINT8_MAX; /* Start off with an invalid vector, updated when it's valid. See below. */ 3240 3241 typedef enum 3242 { 3243 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */ 3244 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */ 3245 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */ 3246 SVMREFLECTXCPT_NONE /* Nothing to reflect. */ 3247 } SVMREFLECTXCPT; 3248 3249 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE; 3167 3250 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION) 3168 3251 { 3169 typedef enum3170 {3171 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */3172 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */3173 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */3174 SVMREFLECTXCPT_NONE /* Nothing to reflect. */3175 } SVMREFLECTXCPT;3176 3177 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;3178 3179 3252 if (pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0 <= SVM_EXIT_EXCEPTION_1F) 3180 3253 { 3181 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0); 3182 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector; 3183 3254 uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_EXCEPTION_0); 3184 3255 if ( uExitVector == X86_XCPT_PF 3185 3256 && uIdtVector == X86_XCPT_PF) … … 3194 3265 { 3195 3266 enmReflect = SVMREFLECTXCPT_DF; 3196 } 3267 Log4(("IDT: Pending vectoring #DF %#RX64 uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntrInfo, uExitVector)); 3268 } 3197 3269 else if (uIdtVector == X86_XCPT_DF) 3198 3270 enmReflect = SVMREFLECTXCPT_TF; … … 3328 3400 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3329 3401 PVM pVM = pVCpu->CTX_SUFF(pVM); 3330 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));3402 int rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 3331 3403 if (RT_LIKELY(rc == VINF_SUCCESS)) 3332 3404 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ … … 3347 3419 { 3348 3420 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3349 int rc = EMInterpretRdtscp(pV M, pVCpu, pCtx);3421 int rc = EMInterpretRdtscp(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 3350 3422 if (RT_LIKELY(rc == VINF_SUCCESS)) 3351 3423 pCtx->rip += 3; /* Hardcoded opcode, AMD-V doesn't give us this information. */ … … 3366 3438 { 3367 3439 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3368 int rc = EMInterpretRdpmc(pV M, pVCpu, CPUMCTX2CORE(pCtx));3440 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 3369 3441 if (RT_LIKELY(rc == VINF_SUCCESS)) 3370 3442 pCtx->rip += 2; /* Hardcoded opcode, AMD-V doesn't give us this information. */ … … 3385 3457 { 3386 3458 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3459 PVM pVM = pVCpu->CTX_SUFF(pVM); 3387 3460 Assert(!pVM->hm.s.fNestedPaging); 3388 3461 … … 3414 3487 { 3415 3488 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3416 int rc = EMInterpretMonitor(pV M, pVCpu, CPUMCTX2CORE(pCtx));3489 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 3417 3490 if (RT_LIKELY(rc == VINF_SUCCESS)) 3418 3491 pCtx->rip += 3; /* Hardcoded opcode, AMD-V doesn't give us this information. */ … … 3433 3506 { 3434 3507 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3435 int rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 3508 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 3509 int rc = VBOXSTRICTRC_VAL(rc2); 3436 3510 if ( rc == VINF_EM_HALT 3437 3511 || rc == VINF_SUCCESS) … … 3475 3549 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3476 3550 /** @todo Decode Assist. */ 3477 int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3551 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3552 int rc = VBOXSTRICTRC_VAL(rc2); 3478 3553 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3); 3479 3554 Assert((pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0) <= 15); … … 3490 3565 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3491 3566 /** @todo Decode Assist. */ 3492 int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3493 if (rc == VINF_SUCCCES) 3567 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3568 int rc = VBOXSTRICTRC_VAL(rc2); 3569 if (rc == VINF_SUCCESS) 3494 3570 { 3495 3571 /* RIP has been updated by EMInterpretInstruction(). */ … … 3502 3578 3503 3579 case 3: /* CR3. */ 3504 Assert(!pV M->hm.s.fNestedPaging);3580 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); 3505 3581 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 3506 3582 break; … … 3515 3591 3516 3592 default: 3517 Asser MsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n",3593 AssertMsgFailed(("hmR0SvmExitWriteCRx: Invalid/Unexpected Write-CRx exit. u64ExitCode=%#RX64 %#x CRx=%#RX64\n", 3518 3594 pSvmTransient->u64ExitCode, pSvmTransient->u64ExitCode - SVM_EXIT_WRITE_CR0)); 3519 3595 break; … … 3533 3609 { 3534 3610 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3535 return hmR0SvmSetPendingXcptUD(pVCpu); 3611 hmR0SvmSetPendingXcptUD(pVCpu); 3612 return VINF_SUCCESS; 3536 3613 } 3537 3614 … … 3544 3621 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3545 3622 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3623 PVM pVM = pVCpu->CTX_SUFF(pVM); 3546 3624 3547 3625 int rc; … … 3564 3642 } 3565 3643 3566 rc = EMInterpretWrmsr(pV Cpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));3644 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 3567 3645 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc)); 3568 3646 … … 3574 3652 /* MSR Read access. */ 3575 3653 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 3576 int rc = EMInterpretRdmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx));3654 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 3577 3655 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc)); 3578 3656 } … … 3609 3687 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */ 3610 3688 PVM pVM = pVCpu->CTX_SUFF(pVM); 3611 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);3689 int rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 3612 3690 AssertRC(rc); 3613 3691 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 3614 3692 3615 3693 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 3616 return VINF_SUCCESS;3694 return rc; 3617 3695 } 3618 3696 3619 3697 /** @todo Decode assist. */ 3620 int rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3698 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 3699 int rc = VBOXSTRICTRC_VAL(rc2); 3621 3700 if (RT_LIKELY(rc == VINF_SUCCESS)) 3622 3701 { … … 3625 3704 } 3626 3705 else 3627 Assert( c == VERR_EM_INTERPRETER);3706 Assert(rc == VERR_EM_INTERPRETER); 3628 3707 return rc; 3629 3708 } … … 3656 3735 the result (in AL/AX/EAX). */ 3657 3736 3737 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3738 PVM pVM = pVCpu->CTX_SUFF(pVM); 3739 3658 3740 /* Refer AMD spec. 15.10.2 "IN and OUT Behaviour" and Figure 15-2. "EXITINFO1 for IOIO Intercept" for the format. */ 3659 3741 SVMIOIOEXIT IoExitInfo; … … 3683 3765 if (IoExitInfo.n.u1Type == 0) /* OUT */ 3684 3766 { 3685 rc = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 3686 (DISCPUMODE)pDis->uAddrMode, uIOSize); 3767 VBOXSTRICTRC rc2 = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 3768 (DISCPUMODE)pDis->uAddrMode, uIOSize); 3769 rc = VBOXSTRICTRC_VAL(rc2); 3687 3770 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 3688 3771 } 3689 3772 else 3690 3773 { 3691 rc = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 3692 (DISCPUMODE)pDis->uAddrMode, uIOSize); 3774 VBOXSTRICTRC rc2 = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 3775 (DISCPUMODE)pDis->uAddrMode, uIOSize); 3776 rc = VBOXSTRICTRC_VAL(rc2); 3693 3777 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 3694 3778 } … … 3704 3788 if (IoExitInfo.n.u1Type == 0) /* OUT */ 3705 3789 { 3706 rc = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize); 3790 VBOXSTRICTRC rc2 = IOMIOPortWrite(pVM, pVCpu, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize); 3791 rc = VBOXSTRICTRC_VAL(rc2); 3707 3792 if (rc == VINF_IOM_R3_IOPORT_WRITE) 3708 3793 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVmcb->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, uAndVal, uIOSize); … … 3714 3799 uint32_t u32Val = 0; 3715 3800 3716 rc = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, uIOSize); 3801 VBOXSTRICTRC rc2 = IOMIOPortRead(pVM, pVCpu, IoExitInfo.n.u16Port, &u32Val, uIOSize); 3802 rc = VBOXSTRICTRC_VAL(rc2); 3717 3803 if (IOM_SUCCESS(rc)) 3718 3804 { … … 3838 3924 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 3839 3925 if (!pPatch) 3840 { 3841 rc = VINF_EM_HM_PATCH_TPR_INSTR; 3842 return rc; 3843 } 3926 return VINF_EM_HM_PATCH_TPR_INSTR; 3844 3927 } 3845 3928 } … … 3864 3947 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) 3865 3948 { 3866 rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr, u32ErrCode); 3949 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, enmNestedPagingMode, CPUMCTX2CORE(pCtx), GCPhysFaultAddr, 3950 u32ErrCode); 3951 rc = VBOXSTRICTRC_VAL(rc2); 3867 3952 3868 3953 /* … … 3913 3998 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3914 3999 4000 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 3915 4001 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 0; /* No virtual interrupts pending, we'll inject the current one before reentry. */ 3916 4002 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; … … 3920 4006 pVmcb->ctrl.u64VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_TPR); 3921 4007 3922 /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEvent () and resume guest execution. */4008 /* Deliver the pending interrupt via hmR0SvmPreRunGuest()->hmR0SvmInjectEventVmcb() and resume guest execution. */ 3923 4009 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow); 3924 4010 return VINF_SUCCESS; … … 3968 4054 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 3969 4055 3970 int rc = hmR0SvmEmulateMovTpr(pV M, pVCpu, pCtx);4056 int rc = hmR0SvmEmulateMovTpr(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 3971 4057 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 3972 4058 hmR0SvmSetPendingXcptUD(pVCpu); … … 4011 4097 #endif 4012 4098 4099 PVM pVM = pVCpu->CTX_SUFF(pVM); 4013 4100 Assert(!pVM->hm.s.fNestedPaging); 4014 4101 … … 4028 4115 /* Check if the page at the fault-address is the APIC base. */ 4029 4116 RTGCPHYS GCPhysPage; 4030 rc= PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage);4031 if ( rc == VINF_SUCCESS4032 && GCPhys == GCPhysApicBase)4117 int rc2 = PGMGstGetPage(pVCpu, (RTGCPTR)uFaultAddress, NULL /* pfFlags */, &GCPhysPage); 4118 if ( rc2 == VINF_SUCCESS 4119 && GCPhysPage == GCPhysApicBase) 4033 4120 { 4034 4121 /* Only attempt to patch the instruction once. */ … … 4044 4131 4045 4132 TRPMAssertXcptPF(pVCpu, uFaultAddress, u32ErrCode); 4046 rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);4133 int rc = PGMTrap0eHandler(pVCpu, u32ErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress); 4047 4134 4048 4135 Log2(("#PF rc=%Rrc\n", rc)); … … 4098 4185 4099 4186 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */ 4100 PVM pVM = pVCpu->CTX_SUFF(pVM); 4101 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx); 4187 int rc = CPUMR0LoadGuestFPU(pVCpu->CTX_SUFF(pVM), pVCpu, pCtx); 4102 4188 if (rc == VINF_SUCCESS) 4103 4189 { … … 4125 4211 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 4126 4212 4213 int rc; 4127 4214 if (!(pCtx->cr0 & X86_CR0_NE)) 4128 4215 { … … 4140 4227 } 4141 4228 4229 4230 /** 4231 * #VMEXIT handler for debug exception (SVM_EXIT_DB). Conditional #VMEXIT. 4232 */ 4233 HMSVM_EXIT_DECL hmR0SvmExitXcptDB(PVMCPU pVCpu, PCPUMCTX pCtx, PSVMTRANSIENT pSvmTransient) 4234 { 4235 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 4236 4237 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 4238 4239 /* -XXX- todo!!*/ 4240 return VERR_NOT_IMPLEMENTED; 4241 } 4242
Note:
See TracChangeset
for help on using the changeset viewer.