- Timestamp:
- May 21, 2015 5:04:14 PM (10 years ago)
- svn:sync-xref-src-repo-rev:
- 100526
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/CSAMAll.cpp
r55937 r56013 72 72 * @param pvUser User argument. 73 73 */ 74 PGM_ALL_CB2_DECL(int) csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, 75 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 74 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 75 csamCodePageWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, 76 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 76 77 { 77 78 RTGCPTR const GCPtrMonitored = (uintptr_t)pvUser | (GCPtr & PAGE_OFFSET_MASK); -
trunk/src/VBox/VMM/VMMAll/PATMAll.cpp
r55937 r56013 58 58 * @param pvUser The address of the guest page we're monitoring. 59 59 */ 60 PGM_ALL_CB2_DECL( int) patmVirtPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,61 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)60 PGM_ALL_CB2_DECL(VBOXSTRICTRC) patmVirtPageHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, 61 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 62 62 { 63 63 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r55988 r56013 719 719 && uErr & X86_TRAP_PF_RW) 720 720 { 721 VBOXSTRICTRC rcStrict; 721 722 # ifdef IN_RC 722 723 STAM_PROFILE_START(&pCur->Stat, h); … … 724 725 void *pvUser = pCur->CTX_SUFF(pvUser); 725 726 pgmUnlock(pVM); 726 rc = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key,727 pvFault - pCur->Core.Key, pvUser);727 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key, 728 pvFault - pCur->Core.Key, pvUser); 728 729 pgmLock(pVM); 729 730 STAM_PROFILE_STOP(&pCur->Stat, h); 730 731 # else 731 732 AssertFailed(); 732 rc = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */733 rcStrict = VINF_EM_RAW_EMULATE_INSTR; /* can't happen with VMX */ 733 734 # endif 734 735 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eHandlersMapping); 735 736 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2Mapping; }); 736 return rc;737 return VBOXSTRICTRC_TODO(rcStrict); 737 738 } 738 739 … … 816 817 || pCurType->enmKind != PGMVIRTHANDLERKIND_WRITE ) ) 817 818 { 819 VBOXSTRICTRC rcStrict; 818 820 # ifdef IN_RC 819 821 STAM_PROFILE_START(&pCur->Stat, h); 820 822 void *pvUser = pCur->CTX_SUFF(pvUser); 821 823 pgmUnlock(pVM); 822 rc = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key,823 pvFault - pCur->Core.Key, pvUser);824 rcStrict = pCurType->CTX_SUFF(pfnPfHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault, pCur->Core.Key, 825 pvFault - pCur->Core.Key, pvUser); 824 826 pgmLock(pVM); 825 827 STAM_PROFILE_STOP(&pCur->Stat, h); 826 828 # else 827 rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */829 rcStrict = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */ 828 830 # endif 829 831 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZTrap0eTime2HndVirt; }); 830 return rc;832 return VBOXSTRICTRC_TODO(rcStrict); 831 833 } 832 834 } -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r55899 r56013 47 47 #endif 48 48 49 50 #ifndef IN_RING0 51 52 # ifdef SELM_TRACK_GUEST_GDT_CHANGES 53 /** 54 * @callback_method_impl{FNPGMVIRTHANDLER} 55 */ 56 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 57 selmGuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, 58 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 59 { 60 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType); 61 Log(("selmGuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf); 62 NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser); 63 64 # ifdef IN_RING3 65 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 66 return VINF_PGM_HANDLER_DO_DEFAULT; 67 68 # else /* IN_RC: */ 69 /* 70 * Execute the write, doing necessary pre and post shadow GDT checks. 71 */ 72 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 73 uint32_t offGuestGdt = pCtx->gdtr.pGdt - GCPtr; 74 selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx); 75 memcpy(pvBuf, pvPtr, cbBuf); 76 VBOXSTRICTRC rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offGuestGdt, cbBuf, pCtx); 77 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT)) 78 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); 79 else 80 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled); 81 return rcStrict; 82 # endif 83 } 84 # endif 85 86 87 # ifdef SELM_TRACK_GUEST_LDT_CHANGES 88 /** 89 * @callback_method_impl{FNPGMVIRTHANDLER} 90 */ 91 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 92 selmGuestLDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, 93 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 94 { 95 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType); 96 Log(("selmGuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf); 97 NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser); 98 99 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 100 # ifdef IN_RING3 101 return VINF_PGM_HANDLER_DO_DEFAULT; 102 # else 103 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT); 104 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT; 105 # endif 106 } 107 # endif 108 109 110 # ifdef SELM_TRACK_GUEST_TSS_CHANGES 111 /** 112 * @callback_method_impl{FNPGMVIRTHANDLER} 113 */ 114 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 115 selmGuestTSSWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, 116 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 117 { 118 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType); 119 Log(("selmGuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf)); 120 NOREF(pvBuf); NOREF(GCPtr); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser); NOREF(pvPtr); 121 122 # ifdef IN_RING3 123 /** @todo This can be optimized by checking for the ESP0 offset and tracking TR 124 * reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We 125 * should probably also deregister the virtual handler if TR.base/size 126 * changes while we're in REM. May also share 127 * selmRCGuestTssPostWriteCheck code. */ 128 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 129 return VINF_PGM_HANDLER_DO_DEFAULT; 130 131 # else /* IN_RC */ 132 /* 133 * Do the write and check if anything relevant changed. 134 */ 135 Assert(pVM->selm.s.GCPtrGuestTss != (uintptr_t)RTRCPTR_MAX); 136 memcpy(pvPtr, pvBuf, cbBuf); 137 return selmRCGuestTssPostWriteCheck(pVM, pVCpu, GCPtr - pVM->selm.s.GCPtrGuestTss, cbBuf); 138 # endif 139 } 140 # endif 141 142 #endif /* IN_RING0 */ 49 143 50 144 -
trunk/src/VBox/VMM/VMMAll/TRPMAll.cpp
r55001 r56013 41 41 #include "internal/pgm.h" 42 42 43 44 45 #if defined(TRPM_TRACK_GUEST_IDT_CHANGES) && !defined(IN_RING0) 46 /** 47 * \#PF Handler callback for virtual access handler ranges. 48 * 49 * Important to realize that a physical page in a range can have aliases, and 50 * for ALL and WRITE handlers these will also trigger. 51 * 52 * @returns VINF_SUCCESS if the handler have carried out the operation. 53 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation. 54 * @param pVM Pointer to the VM. 55 * @param pVCpu Pointer to the cross context CPU context for the 56 * calling EMT. 57 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!) 58 * @param pvPtr The HC mapping of that address. 59 * @param pvBuf What the guest is reading/writing. 60 * @param cbBuf How much it's reading/writing. 61 * @param enmAccessType The access type. 62 * @param enmOrigin The origin of this call. 63 * @param pvUser User argument. 64 */ 65 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 66 trpmGuestIDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, 67 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 68 { 69 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType); 70 Log(("trpmGuestIDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf); 71 NOREF(pvPtr); NOREF(pvUser); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser); 72 Assert(!HMIsEnabled(pVM)); 73 74 /** @todo Check which IDT entry and keep the update cost low in TRPMR3SyncIDT() and CSAMCheckGates(). */ 75 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT); 76 # ifdef IN_RC 77 STAM_COUNTER_INC(&pVM->trpm.s.StatRCWriteGuestIDTFault); 78 # endif 79 return VINF_PGM_HANDLER_DO_DEFAULT; 80 } 81 #endif /* TRPM_TRACK_GUEST_IDT_CHANGES && !IN_RING0 */ 43 82 44 83 -
trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
r55966 r56013 292 292 VMMR3_INT_DECL(int) PGMR3HandlerVirtualTypeRegisterEx(PVM pVM, PGMVIRTHANDLERKIND enmKind, bool fRelocUserRC, 293 293 PFNPGMR3VIRTINVALIDATE pfnInvalidateR3, 294 PFNPGM R3VIRTHANDLER pfnHandlerR3,294 PFNPGMVIRTHANDLER pfnHandlerR3, 295 295 RCPTRTYPE(FNPGMRCVIRTPFHANDLER) pfnPfHandlerRC, 296 296 const char *pszDesc, PPGMVIRTHANDLERTYPE phType) … … 354 354 VMMR3_INT_DECL(int) PGMR3HandlerVirtualTypeRegister(PVM pVM, PGMVIRTHANDLERKIND enmKind, bool fRelocUserRC, 355 355 PFNPGMR3VIRTINVALIDATE pfnInvalidateR3, 356 PFNPGM R3VIRTHANDLER pfnHandlerR3,356 PFNPGMVIRTHANDLER pfnHandlerR3, 357 357 const char *pszPfHandlerRC, const char *pszDesc, 358 358 PPGMVIRTHANDLERTYPE phType) -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r55900 r56013 95 95 static DECLCALLBACK(int) selmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass); 96 96 static DECLCALLBACK(int) selmR3LoadDone(PVM pVM, PSSMHANDLE pSSM); 97 static FNPGMR3VIRTHANDLER selmR3GuestGDTWriteHandler;98 static FNPGMR3VIRTHANDLER selmR3GuestLDTWriteHandler;99 static FNPGMR3VIRTHANDLER selmR3GuestTSSWriteHandler;100 97 static DECLCALLBACK(void) selmR3InfoGdt(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); 101 98 static DECLCALLBACK(void) selmR3InfoGdtGuest(PVM pVM, PCDBGFINFOHLP pHlp, const char *pszArgs); … … 222 219 # endif 223 220 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/, 224 NULL /*pfnInvalidateR3*/, selm R3GuestGDTWriteHandler,221 NULL /*pfnInvalidateR3*/, selmGuestGDTWriteHandler, 225 222 "selmRCGuestGDTWritePfHandler", 226 223 "Guest GDT write access handler", &pVM->selm.s.hGuestGdtWriteHandlerType); 227 224 AssertRCReturn(rc, rc); 228 225 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/, 229 NULL /*pfnInvalidateR3*/, selm R3GuestLDTWriteHandler,226 NULL /*pfnInvalidateR3*/, selmGuestLDTWriteHandler, 230 227 "selmRCGuestLDTWritePfHandler", 231 228 "Guest LDT write access handler", &pVM->selm.s.hGuestLdtWriteHandlerType); 232 229 AssertRCReturn(rc, rc); 233 230 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/, 234 NULL /*pfnInvalidateR3*/, selm R3GuestTSSWriteHandler,231 NULL /*pfnInvalidateR3*/, selmGuestTSSWriteHandler, 235 232 "selmRCGuestTSSWritePfHandler", 236 233 "Guest TSS write access handler", &pVM->selm.s.hGuestTssWriteHandlerType); … … 1479 1476 } 1480 1477 1481 #endif /*VBOX_WITH_RAW_MODE*/1482 1483 #ifdef SELM_TRACK_GUEST_GDT_CHANGES1484 /**1485 * \#PF Handler callback for virtual access handler ranges.1486 *1487 * Important to realize that a physical page in a range can have aliases, and1488 * for ALL and WRITE handlers these will also trigger.1489 *1490 * @returns VINF_SUCCESS if the handler have carried out the operation.1491 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.1492 * @param pVM Pointer to the VM.1493 * @param pVCpu Pointer to the cross context CPU context for the1494 * calling EMT.1495 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)1496 * @param pvPtr The HC mapping of that address.1497 * @param pvBuf What the guest is reading/writing.1498 * @param cbBuf How much it's reading/writing.1499 * @param enmAccessType The access type.1500 * @param enmOrigin Who is making this write.1501 * @param pvUser Unused.1502 */1503 static DECLCALLBACK(int) selmR3GuestGDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,1504 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)1505 {1506 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);1507 Log(("selmR3GuestGDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);1508 NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);1509 1510 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT);1511 return VINF_PGM_HANDLER_DO_DEFAULT;1512 }1513 #endif1514 1515 #ifdef SELM_TRACK_GUEST_LDT_CHANGES1516 /**1517 * \#PF Handler callback for virtual access handler ranges.1518 *1519 * Important to realize that a physical page in a range can have aliases, and1520 * for ALL and WRITE handlers these will also trigger.1521 *1522 * @returns VINF_SUCCESS if the handler have carried out the operation.1523 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.1524 * @param pVM Pointer to the VM.1525 * @param pVCpu Pointer to the cross context CPU context for the1526 * calling EMT.1527 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)1528 * @param pvPtr The HC mapping of that address.1529 * @param pvBuf What the guest is reading/writing.1530 * @param cbBuf How much it's reading/writing.1531 * @param enmAccessType The access type.1532 * @param enmOrigin Who is making this write.1533 * @param pvUser Unused.1534 */1535 static DECLCALLBACK(int) selmR3GuestLDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,1536 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)1537 {1538 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);1539 Log(("selmR3GuestLDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);1540 NOREF(pvPtr); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);1541 1542 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT);1543 return VINF_PGM_HANDLER_DO_DEFAULT;1544 }1545 #endif1546 1547 1548 #ifdef SELM_TRACK_GUEST_TSS_CHANGES1549 /**1550 * \#PF Handler callback for virtual access handler ranges.1551 *1552 * Important to realize that a physical page in a range can have aliases, and1553 * for ALL and WRITE handlers these will also trigger.1554 *1555 * @returns VINF_SUCCESS if the handler have carried out the operation.1556 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.1557 * @param pVM Pointer to the VM.1558 * @param pVCpu Pointer to the cross context CPU context for the1559 * calling EMT.1560 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)1561 * @param pvPtr The HC mapping of that address.1562 * @param pvBuf What the guest is reading/writing.1563 * @param cbBuf How much it's reading/writing.1564 * @param enmAccessType The access type.1565 * @param enmOrigin Who is making this write.1566 * @param pvUser Unused.1567 */1568 static DECLCALLBACK(int) selmR3GuestTSSWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,1569 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)1570 {1571 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);1572 Log(("selmR3GuestTSSWriteHandler: write %.*Rhxs to %RGv size %d\n", RT_MIN(8, cbBuf), pvBuf, GCPtr, cbBuf));1573 NOREF(pvBuf); NOREF(GCPtr); NOREF(cbBuf); NOREF(enmOrigin); NOREF(pvUser); NOREF(pvPtr);1574 1575 /** @todo This can be optimized by checking for the ESP0 offset and tracking TR1576 * reloads in REM (setting VM_FF_SELM_SYNC_TSS if TR is reloaded). We1577 * should probably also deregister the virtual handler if TR.base/size1578 * changes while we're in REM. */1579 1580 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS);1581 return VINF_PGM_HANDLER_DO_DEFAULT;1582 }1583 #endif1584 1585 #ifdef VBOX_WITH_RAW_MODE1586 1478 1587 1479 /** … … 1835 1727 VMMR3DECL(int) SELMR3DebugCheck(PVM pVM) 1836 1728 { 1837 # ifdef VBOX_STRICT1729 # ifdef VBOX_STRICT 1838 1730 PVMCPU pVCpu = VMMGetCpu(pVM); 1839 1731 AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE); … … 1956 1848 } 1957 1849 1958 # else /* !VBOX_STRICT */1850 # else /* !VBOX_STRICT */ 1959 1851 NOREF(pVM); 1960 # endif /* !VBOX_STRICT */1852 # endif /* !VBOX_STRICT */ 1961 1853 1962 1854 return VINF_SUCCESS; … … 1973 1865 VMMR3DECL(bool) SELMR3CheckTSS(PVM pVM) 1974 1866 { 1975 # if defined(VBOX_STRICT) && defined(SELM_TRACK_GUEST_TSS_CHANGES)1867 # if defined(VBOX_STRICT) && defined(SELM_TRACK_GUEST_TSS_CHANGES) 1976 1868 PVMCPU pVCpu = VMMGetCpu(pVM); 1977 1869 … … 2099 1991 return true; 2100 1992 2101 # else /* !VBOX_STRICT */1993 # else /* !VBOX_STRICT */ 2102 1994 NOREF(pVM); 2103 1995 return true; 2104 # endif /* !VBOX_STRICT */1996 # endif /* !VBOX_STRICT */ 2105 1997 } 2106 1998 -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r55900 r56013 424 424 425 425 426 #ifdef VBOX_WITH_RAW_MODE427 /** Enable or disable tracking of Guest's IDT. */428 # define TRPM_TRACK_GUEST_IDT_CHANGES429 /** Enable or disable tracking of Shadow IDT. */430 # define TRPM_TRACK_SHADOW_IDT_CHANGES431 #endif432 433 426 /** TRPM saved state version. */ 434 427 #define TRPM_SAVED_STATE_VERSION 9 … … 441 434 static DECLCALLBACK(int) trpmR3Save(PVM pVM, PSSMHANDLE pSSM); 442 435 static DECLCALLBACK(int) trpmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass); 443 #ifdef TRPM_TRACK_GUEST_IDT_CHANGES444 static FNPGMR3VIRTHANDLER trpmR3GuestIDTWriteHandler;445 #endif446 436 447 437 … … 523 513 # endif 524 514 rc = PGMR3HandlerVirtualTypeRegister(pVM, PGMVIRTHANDLERKIND_WRITE, false /*fRelocUserRC*/, 525 NULL /*pfnInvalidateR3*/, trpm R3GuestIDTWriteHandler,515 NULL /*pfnInvalidateR3*/, trpmGuestIDTWriteHandler, 526 516 "trpmRCGuestIDTWritePfHandler", 527 517 "Guest IDT write access handler", &pVM->trpm.s.hGuestIdtWriteHandlerType); … … 1169 1159 return VINF_SUCCESS; 1170 1160 } 1171 1172 1173 # ifdef TRPM_TRACK_GUEST_IDT_CHANGES1174 /**1175 * \#PF Handler callback for virtual access handler ranges.1176 *1177 * Important to realize that a physical page in a range can have aliases, and1178 * for ALL and WRITE handlers these will also trigger.1179 *1180 * @returns VINF_SUCCESS if the handler have carried out the operation.1181 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.1182 * @param pVM Pointer to the VM.1183 * @param pVCpu Pointer to the cross context CPU context for the1184 * calling EMT.1185 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)1186 * @param pvPtr The HC mapping of that address.1187 * @param pvBuf What the guest is reading/writing.1188 * @param cbBuf How much it's reading/writing.1189 * @param enmAccessType The access type.1190 * @param enmOrigin The origin of this call.1191 * @param pvUser User argument.1192 */1193 static DECLCALLBACK(int) trpmR3GuestIDTWriteHandler(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,1194 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser)1195 {1196 Assert(enmAccessType == PGMACCESSTYPE_WRITE); NOREF(enmAccessType);1197 Log(("trpmR3GuestIDTWriteHandler: write to %RGv size %d\n", GCPtr, cbBuf)); NOREF(GCPtr); NOREF(cbBuf);1198 NOREF(pvPtr); NOREF(pvUser); NOREF(pvBuf); NOREF(enmOrigin); NOREF(pvUser);1199 Assert(!HMIsEnabled(pVM));1200 1201 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT);1202 return VINF_PGM_HANDLER_DO_DEFAULT;1203 }1204 # endif /* TRPM_TRACK_GUEST_IDT_CHANGES */1205 1161 1206 1162 -
trunk/src/VBox/VMM/VMMRC/CSAMRC.cpp
r55966 r56013 67 67 * @param pvUser Ignored. 68 68 */ 69 DECLEXPORT( int) csamRCCodePageWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,70 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)69 DECLEXPORT(VBOXSTRICTRC) csamRCCodePageWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 70 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 71 71 { 72 72 PPATMGCSTATE pPATMGCState; -
trunk/src/VBox/VMM/VMMRC/PATMRC.cpp
r55966 r56013 60 60 * @param pvUser The physical address of the guest page being monitored. 61 61 */ 62 DECLEXPORT( int) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,63 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)62 DECLEXPORT(VBOXSTRICTRC) patmRCVirtPagePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 63 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 64 64 { 65 65 NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); -
trunk/src/VBox/VMM/VMMRC/SELMRC.cpp
r55966 r56013 48 48 49 49 #ifdef SELM_TRACK_GUEST_GDT_CHANGES 50 50 51 /** 51 52 * Synchronizes one GDT entry (guest -> shadow). … … 53 54 * @returns VBox strict status code (appropriate for trap handling and GC 54 55 * return). 56 * @retval VINF_SUCCESS 55 57 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT 56 58 * @retval VINF_SELM_SYNC_GDT 57 * @retval VINF_EM_RESCHEDULE_REM58 59 * 59 60 * @param pVM Pointer to the VM. 60 61 * @param pVCpu The current virtual CPU. 61 * @param p RegFrame Trap register frame.62 * @param pCtx CPU context for the current CPU. 62 63 * @param iGDTEntry The GDT entry to sync. 63 64 * 64 65 * @remarks Caller checks that this isn't the LDT entry! 65 66 */ 66 static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTX CORE pRegFrame, unsigned iGDTEntry)67 static VBOXSTRICTRC selmRCSyncGDTEntry(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry) 67 68 { 68 69 Log2(("GDT %04X LDTR=%04X\n", iGDTEntry, CPUMGetGuestLDTR(pVCpu))); … … 90 91 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 91 92 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); /* paranoia */ 92 return VINF_EM_RESCHEDULE_REM; 93 /* return VINF_EM_RESCHEDULE_REM; - bad idea if we're in a patch. */ 94 return VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; 93 95 } 94 96 } … … 136 138 */ 137 139 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 138 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);139 140 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx); 140 141 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++) … … 180 181 * @param pVM Pointer to the VM. 181 182 * @param pVCpu The current virtual CPU. 182 * @param p RegFrame Trap register frame.183 * @param pCtx The CPU context. 183 184 * @param iGDTEntry The GDT entry to sync. 184 185 */ 185 static void selmRCSyncGDTSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry)186 void selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, unsigned iGDTEntry) 186 187 { 187 188 /* … … 200 201 PCX86DESC pDesc = &pVM->selm.s.CTX_SUFF(paGdt)[iGDTEntry]; 201 202 uint32_t uCpl = CPUMGetGuestCPL(pVCpu); 202 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); Assert(CPUMCTX2CORE(pCtx) == pRegFrame);203 203 PCPUMSELREG paSReg = CPUMCTX_FIRST_SREG(pCtx); 204 204 for (unsigned iSReg = 0; iSReg <= X86_SREG_COUNT; iSReg++) … … 219 219 } 220 220 } 221 222 } 223 224 225 /** 226 * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT. 227 * 228 * @returns VBox status code (appropriate for trap handling and GC return). 229 * @param pVM Pointer to the VM. 230 * @param pVCpu Pointer to the cross context CPU context for the 231 * calling EMT. 232 * @param uErrorCode CPU Error code. 233 * @param pRegFrame Trap register frame. 234 * @param pvFault The fault address (cr2). 235 * @param pvRange The base address of the handled virtual range. 236 * @param offRange The offset of the access into this range. 237 * (If it's a EIP range this is the EIP, if not it's pvFault.) 238 */ 239 DECLEXPORT(int) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 240 RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 221 } 222 223 224 /** 225 * Syncs hidden selector register parts before emulating a GDT change. 226 * 227 * This is shared between the selmRCGuestGDTWritePfHandler and 228 * selmGuestGDTWriteHandler. 229 * 230 * @param pVM Pointer to the cross context VM structure. 231 * @param pVCpu Pointer to the cross context virtual CPU structure. 232 * @param offGuestTss The offset into the TSS of the write that was made. 233 * @param cbWrite The number of bytes written. 234 * @param pCtx The current CPU context. 235 */ 236 void selmRCGuestGdtPreWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx) 237 { 238 uint32_t iGdt = offGuestGdt >> X86_SEL_SHIFT; 239 uint32_t const iGdtLast = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT; 240 do 241 { 242 selmRCSyncGdtSegRegs(pVM, pVCpu, pCtx, iGdt); 243 iGdt++; 244 } while (iGdt <= iGdtLast); 245 } 246 247 248 /** 249 * Checks the guest GDT for changes after a write has been emulated. 250 * 251 * 252 * This is shared between the selmRCGuestGDTWritePfHandler and 253 * selmGuestGDTWriteHandler. 254 * 255 * @retval VINF_SUCCESS 256 * @retval VINF_SELM_SYNC_GDT 257 * @retval VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT 258 * 259 * @param pVM Pointer to the cross context VM structure. 260 * @param pVCpu Pointer to the cross context virtual CPU structure. 261 * @param offGuestTss The offset into the TSS of the write that was made. 262 * @param cbWrite The number of bytes written. 263 * @param pCtx The current CPU context. 264 */ 265 VBOXSTRICTRC selmRCGuestGdtPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx) 266 { 267 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 268 269 /* Check if the LDT was in any way affected. Do not sync the 270 shadow GDT if that's the case or we might have trouble in 271 the world switcher (or so they say). */ 272 uint32_t const iGdtFirst = offGuestGdt >> X86_SEL_SHIFT; 273 uint32_t const iGdtLast = (offGuestGdt + cbWrite - 1) >> X86_SEL_SHIFT; 274 uint32_t const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT; 275 if (iGdtFirst <= iLdt && iGdtLast >= iLdt) 276 { 277 Log(("LDTR selector change -> fall back to HC!!\n")); 278 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 279 rcStrict = VINF_SELM_SYNC_GDT; 280 /** @todo Implement correct stale LDT handling. */ 281 } 282 else 283 { 284 /* Sync the shadow GDT and continue provided the update didn't 285 cause any segment registers to go stale in any way. */ 286 uint32_t iGdt = iGdtFirst; 287 do 288 { 289 VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pCtx, iGdt); 290 Assert(rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT || rcStrict2 == VINF_SELM_SYNC_GDT); 291 if (rcStrict == VINF_SUCCESS) 292 rcStrict = rcStrict2; 293 iGdt++; 294 } while ( iGdt <= iGdtLast 295 && (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT)); 296 if (rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT) 297 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); 298 } 299 return rcStrict; 300 } 301 302 303 /** 304 * @callback_method_impl{FNPGMVIRTHANDLER, Guest GDT write access \#PF handler } 305 */ 306 DECLEXPORT(VBOXSTRICTRC) selmRCGuestGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 307 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 241 308 { 242 309 LogFlow(("selmRCGuestGDTWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange)); … … 246 313 * Check if any selectors might be affected. 247 314 */ 248 unsigned const iGDTE1 = offRange >> X86_SEL_SHIFT; 249 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1); 250 if (((offRange + 8) >> X86_SEL_SHIFT) != iGDTE1) 251 selmRCSyncGDTSegRegs(pVM, pVCpu, pRegFrame, iGDTE1 + 1); 315 selmRCGuestGdtPreWriteCheck(pVM, pVCpu, offRange, 8 /*cbWrite*/, CPUMCTX_FROM_CORE(pRegFrame)); 252 316 253 317 /* … … 257 321 VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); 258 322 if (RT_SUCCESS(rcStrict) && cb) 259 { 260 /* Check if the LDT was in any way affected. Do not sync the 261 shadow GDT if that's the case or we might have trouble in 262 the world switcher (or so they say). */ 263 unsigned const iLdt = CPUMGetGuestLDTR(pVCpu) >> X86_SEL_SHIFT; 264 unsigned const iGDTE2 = (offRange + cb - 1) >> X86_SEL_SHIFT; 265 if ( iGDTE1 == iLdt 266 || iGDTE2 == iLdt) 267 { 268 Log(("LDTR selector change -> fall back to HC!!\n")); 269 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 270 rcStrict = VINF_SELM_SYNC_GDT; 271 /** @todo Implement correct stale LDT handling. */ 272 } 273 else 274 { 275 /* Sync the shadow GDT and continue provided the update didn't 276 cause any segment registers to go stale in any way. */ 277 VBOXSTRICTRC rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE1); 278 if (rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RESCHEDULE_REM) 279 { 280 if (rcStrict == VINF_SUCCESS) 281 rcStrict = rcStrict2; 282 283 if (iGDTE1 != iGDTE2) 284 { 285 rcStrict2 = selmRCSyncGDTEntry(pVM, pVCpu, pRegFrame, iGDTE2); 286 if (rcStrict == VINF_SUCCESS) 287 rcStrict = rcStrict2; 288 } 289 290 if (rcStrict2 == VINF_SUCCESS || rcStrict2 == VINF_EM_RESCHEDULE_REM) 291 { 292 /* VINF_EM_RESCHEDULE_REM - bad idea if we're in a patch. */ 293 if (rcStrict2 == VINF_EM_RESCHEDULE_REM) 294 rcStrict = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; 295 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); 296 return VBOXSTRICTRC_TODO(rcStrict); 297 } 298 } 299 300 /* sync failed, return to ring-3 and resync the GDT. */ 301 if (rcStrict == VINF_SUCCESS || RT_FAILURE(rcStrict2)) 302 rcStrict = rcStrict2; 303 } 304 } 323 rcStrict = selmRCGuestGdtPostWriteCheck(pVM, pVCpu, offRange, cb, CPUMCTX_FROM_CORE(pRegFrame)); 305 324 else 306 325 { … … 311 330 } 312 331 313 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled); 314 return VBOXSTRICTRC_TODO(rcStrict); 315 } 332 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT)) 333 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); 334 else 335 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled); 336 return rcStrict; 337 } 338 316 339 #endif /* SELM_TRACK_GUEST_GDT_CHANGES */ 317 340 318 319 341 #ifdef SELM_TRACK_GUEST_LDT_CHANGES 320 342 /** 321 * \#PF Virtual Handler callback for Guest write access to the Guest's own LDT. 343 * @callback_method_impl{FNPGMVIRTHANDLER, Guest LDT write access \#PF handler } 344 */ 345 DECLEXPORT(VBOXSTRICTRC) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 346 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 347 { 348 /** @todo To be implemented... or not. */ 349 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange)); 350 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser); 351 352 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 353 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT); 354 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT; 355 } 356 #endif 357 358 359 #ifdef SELM_TRACK_GUEST_TSS_CHANGES 360 361 /** 362 * Read wrapper used by selmRCGuestTSSWriteHandler. 363 * @returns VBox status code (appropriate for trap handling and GC return). 364 * @param pVM Pointer to the VM. 365 * @param pvDst Where to put the bits we read. 366 * @param pvSrc Guest address to read from. 367 * @param cb The number of bytes to read. 368 */ 369 DECLINLINE(int) selmRCReadTssBits(PVM pVM, PVMCPU pVCpu, void *pvDst, void const *pvSrc, size_t cb) 370 { 371 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb); 372 if (RT_SUCCESS(rc)) 373 return VINF_SUCCESS; 374 375 /** @todo use different fallback? */ 376 rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc); 377 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc)); 378 if (rc == VINF_SUCCESS) 379 { 380 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb); 381 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc)); 382 } 383 return rc; 384 } 385 386 387 /** 388 * Checks the guest TSS for changes after a write has been emulated. 389 * 390 * This is shared between the 391 * 392 * @returns Strict VBox status code appropriate for raw-mode returns. 393 * @param pVM Pointer to the cross context VM structure. 394 * @param pVCpu Pointer to the cross context virtual CPU structure. 395 * @param offGuestTss The offset into the TSS of the write that was made. 396 * @param cbWrite The number of bytes written. 397 */ 398 VBOXSTRICTRC selmRCGuestTssPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite) 399 { 400 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 401 402 /* 403 * If it's on the same page as the esp0 and ss0 fields or actually one of them, 404 * then check if any of these has changed. 405 */ 406 /** @todo just read the darn fields and put them on the stack. */ 407 PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss; 408 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0) 409 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss) 410 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1 411 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 412 ) 413 { 414 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n", 415 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0)); 416 pVM->selm.s.Tss.esp1 = pGuestTss->esp0; 417 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1; 418 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 419 } 420 # ifdef VBOX_WITH_RAW_RING1 421 else if ( EMIsRawRing1Enabled(pVM) 422 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1) 423 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offGuestTss) 424 && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2 425 || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */ 426 ) 427 { 428 Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n", 429 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1)); 430 pVM->selm.s.Tss.esp2 = pGuestTss->esp1; 431 pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2; 432 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 433 } 434 # endif 435 /* Handle misaligned TSS in a safe manner (just in case). */ 436 else if ( offGuestTss >= RT_UOFFSETOF(VBOXTSS, esp0) 437 && offGuestTss < RT_UOFFSETOF(VBOXTSS, padding_ss0)) 438 { 439 struct 440 { 441 uint32_t esp0; 442 uint16_t ss0; 443 uint16_t padding_ss0; 444 } s; 445 AssertCompileSize(s, 8); 446 rcStrict = selmRCReadTssBits(pVM, pVCpu, &s, &pGuestTss->esp0, sizeof(s)); 447 if ( rcStrict == VINF_SUCCESS 448 && ( s.esp0 != pVM->selm.s.Tss.esp1 449 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 450 ) 451 { 452 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n", 453 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0)); 454 pVM->selm.s.Tss.esp1 = s.esp0; 455 pVM->selm.s.Tss.ss1 = s.ss0 | 1; 456 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 457 } 458 } 459 460 /* 461 * If VME is enabled we need to check if the interrupt redirection bitmap 462 * needs updating. 463 */ 464 if ( offGuestTss >= RT_UOFFSETOF(VBOXTSS, offIoBitmap) 465 && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME)) 466 { 467 if (offGuestTss - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap)) 468 { 469 uint16_t offIoBitmap = pGuestTss->offIoBitmap; 470 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap) 471 { 472 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); 473 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 474 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 475 } 476 else 477 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); 478 } 479 else 480 { 481 /** @todo not sure how the partial case is handled; probably not allowed */ 482 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap); 483 if ( offIntRedirBitmap <= offGuestTss 484 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offGuestTss + cbWrite 485 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss) 486 { 487 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offGuestTss=%x cbWrite=%x\n", 488 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offGuestTss, cbWrite)); 489 490 /** @todo only update the changed part. */ 491 for (uint32_t i = 0; rcStrict == VINF_SUCCESS && i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++) 492 rcStrict = selmRCReadTssBits(pVM, pVCpu, &pVM->selm.s.Tss.IntRedirBitmap[i * 8], 493 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8); 494 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir); 495 } 496 } 497 } 498 499 /* 500 * Return to ring-3 for a full resync if any of the above fails... (?) 501 */ 502 if (rcStrict != VINF_SUCCESS) 503 { 504 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 505 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 506 if (RT_SUCCESS(rcStrict)) 507 rcStrict = VINF_SUCCESS; 508 } 509 510 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled); 511 return rcStrict; 512 } 513 514 515 /** 516 * @callback_method_impl{FNPGMVIRTHANDLER, Guest TSS write access \#PF handler} 517 */ 518 DECLEXPORT(VBOXSTRICTRC) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 519 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 520 { 521 LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange)); 522 NOREF(pvRange); NOREF(pvUser); 523 524 /* 525 * Try emulate the access. 526 */ 527 uint32_t cb; 528 VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); 529 if ( RT_SUCCESS(rcStrict) 530 && cb) 531 rcStrict = selmRCGuestTssPostWriteCheck(pVM, pVCpu, offRange, cb); 532 else 533 { 534 AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict))); 535 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 536 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled); 537 if (rcStrict == VERR_EM_INTERPRETER) 538 rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT; 539 } 540 return rcStrict; 541 } 542 543 #endif /* SELM_TRACK_GUEST_TSS_CHANGES */ 544 545 #ifdef SELM_TRACK_SHADOW_GDT_CHANGES 546 /** 547 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT. 322 548 * 323 549 * @returns VBox status code (appropriate for trap handling and GC return). … … 333 559 * @param pvUser Unused. 334 560 */ 335 DECLEXPORT(int) selmRCGuestLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 336 RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 337 { 338 /** @todo To be implemented. */ 339 ////LogCom(("selmRCGuestLDTWriteHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange)); 340 NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser); 341 342 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 343 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestLDT); 344 return VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT; 561 DECLEXPORT(VBOXSTRICTRC) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 562 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 563 { 564 LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange)); 565 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser); 566 return VERR_SELM_SHADOW_GDT_WRITE; 345 567 } 346 568 #endif 347 569 348 570 349 #ifdef SELM_TRACK_GUEST_TSS_CHANGES 350 /** 351 * Read wrapper used by selmRCGuestTSSWriteHandler. 352 * @returns VBox status code (appropriate for trap handling and GC return). 353 * @param pVM Pointer to the VM. 354 * @param pvDst Where to put the bits we read. 355 * @param pvSrc Guest address to read from. 356 * @param cb The number of bytes to read. 357 */ 358 DECLINLINE(int) selmRCReadTssBits(PVM pVM, void *pvDst, void const *pvSrc, size_t cb) 359 { 360 PVMCPU pVCpu = VMMGetCpu0(pVM); 361 362 int rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb); 363 if (RT_SUCCESS(rc)) 364 return VINF_SUCCESS; 365 366 /** @todo use different fallback? */ 367 rc = PGMPrefetchPage(pVCpu, (uintptr_t)pvSrc); 368 AssertMsg(rc == VINF_SUCCESS, ("PGMPrefetchPage %p failed with %Rrc\n", &pvSrc, rc)); 369 if (rc == VINF_SUCCESS) 370 { 371 rc = MMGCRamRead(pVM, pvDst, (void *)pvSrc, cb); 372 AssertMsg(rc == VINF_SUCCESS, ("MMGCRamRead %p failed with %Rrc\n", &pvSrc, rc)); 373 } 374 return rc; 375 } 376 377 /** 378 * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS. 379 * 380 * @returns VBox status code (appropriate for trap handling and GC return). 381 * @param pVM Pointer to the VM. 382 * @param pVCpu Pointer to the cross context CPU context for the 383 * calling EMT. 384 * @param uErrorCode CPU Error code. 385 * @param pRegFrame Trap register frame. 386 * @param pvFault The fault address (cr2). 387 * @param pvRange The base address of the handled virtual range. 388 * @param offRange The offset of the access into this range. 389 * (If it's a EIP range this is the EIP, if not it's pvFault.) 390 * @param pvUser Unused. 391 */ 392 DECLEXPORT(int) selmRCGuestTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 393 RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 394 { 395 LogFlow(("selmRCGuestTSSWritePfHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange)); 396 NOREF(pvRange); NOREF(pvUser); 397 398 /* 399 * Try emulate the access. 400 */ 401 uint32_t cb; 402 VBOXSTRICTRC rcStrict = EMInterpretInstructionEx(pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); 403 if ( RT_SUCCESS(rcStrict) 404 && cb) 405 { 406 rcStrict = VINF_SUCCESS; 407 408 /* 409 * If it's on the same page as the esp0 and ss0 fields or actually one of them, 410 * then check if any of these has changed. 411 */ 412 PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss; 413 if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0) 414 && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange) 415 && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1 416 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 417 ) 418 { 419 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n", 420 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0)); 421 pVM->selm.s.Tss.esp1 = pGuestTss->esp0; 422 pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1; 423 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 424 } 425 #ifdef VBOX_WITH_RAW_RING1 426 else if ( EMIsRawRing1Enabled(pVM) 427 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS(&pGuestTss->padding_ss1) 428 && PAGE_ADDRESS(&pGuestTss->esp1) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange) 429 && ( pGuestTss->esp1 != pVM->selm.s.Tss.esp2 430 || pGuestTss->ss1 != ((pVM->selm.s.Tss.ss2 & ~2) | 1)) /* undo raw-r1 */ 431 ) 432 { 433 Log(("selmRCGuestTSSWritePfHandler: R1 stack: %RTsel:%RGv -> %RTsel:%RGv\n", 434 (RTSEL)((pVM->selm.s.Tss.ss2 & ~2) | 1), (RTGCPTR)pVM->selm.s.Tss.esp2, (RTSEL)pGuestTss->ss1, (RTGCPTR)pGuestTss->esp1)); 435 pVM->selm.s.Tss.esp2 = pGuestTss->esp1; 436 pVM->selm.s.Tss.ss2 = (pGuestTss->ss1 & ~1) | 2; 437 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 438 } 439 #endif 440 /* Handle misaligned TSS in a safe manner (just in case). */ 441 else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0) 442 && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0)) 443 { 444 struct 445 { 446 uint32_t esp0; 447 uint16_t ss0; 448 uint16_t padding_ss0; 449 } s; 450 AssertCompileSize(s, 8); 451 rcStrict = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s)); 452 if ( rcStrict == VINF_SUCCESS 453 && ( s.esp0 != pVM->selm.s.Tss.esp1 454 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ 455 ) 456 { 457 Log(("selmRCGuestTSSWritePfHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n", 458 (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0)); 459 pVM->selm.s.Tss.esp1 = s.esp0; 460 pVM->selm.s.Tss.ss1 = s.ss0 | 1; 461 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); 462 } 463 } 464 465 /* 466 * If VME is enabled we need to check if the interrupt redirection bitmap 467 * needs updating. 468 */ 469 if ( offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap) 470 && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME)) 471 { 472 if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap)) 473 { 474 uint16_t offIoBitmap = pGuestTss->offIoBitmap; 475 if (offIoBitmap != pVM->selm.s.offGuestIoBitmap) 476 { 477 Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); 478 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 479 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 480 } 481 else 482 Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); 483 } 484 else 485 { 486 /** @todo not sure how the partial case is handled; probably not allowed */ 487 uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap); 488 if ( offIntRedirBitmap <= offRange 489 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb 490 && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss) 491 { 492 Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n", 493 pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb)); 494 495 /** @todo only update the changed part. */ 496 for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++) 497 { 498 rcStrict = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8], 499 (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8); 500 if (rcStrict != VINF_SUCCESS) 501 break; 502 } 503 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir); 504 } 505 } 506 } 507 508 /* Return to ring-3 for a full resync if any of the above fails... (?) */ 509 if (rcStrict != VINF_SUCCESS) 510 { 511 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 512 VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); 513 if (RT_SUCCESS(rcStrict)) 514 rcStrict = VINF_SUCCESS; 515 } 516 517 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled); 518 } 519 else 520 { 521 AssertMsg(RT_FAILURE(rcStrict), ("cb=%u rcStrict=%#x\n", cb, VBOXSTRICTRC_VAL(rcStrict))); 522 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 523 STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled); 524 if (rcStrict == VERR_EM_INTERPRETER) 525 rcStrict = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT; 526 } 527 return VBOXSTRICTRC_TODO(rcStrict); 528 } 529 #endif /* SELM_TRACK_GUEST_TSS_CHANGES */ 530 531 532 #ifdef SELM_TRACK_SHADOW_GDT_CHANGES 533 /** 534 * \#PF Virtual Handler callback for Guest write access to the VBox shadow GDT. 571 #ifdef SELM_TRACK_SHADOW_LDT_CHANGES 572 /** 573 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT. 535 574 * 536 575 * @returns VBox status code (appropriate for trap handling and GC return). … … 546 585 * @param pvUser Unused. 547 586 */ 548 DECLEXPORT(int) selmRCShadowGDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 549 RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 550 { 551 LogRel(("FATAL ERROR: selmRCShadowGDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange)); 587 DECLEXPORT(VBOXSTRICTRC) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 588 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 589 { 590 LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange)); 591 Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE)); 552 592 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser); 553 return VERR_SELM_SHADOW_ GDT_WRITE;593 return VERR_SELM_SHADOW_LDT_WRITE; 554 594 } 555 595 #endif 556 596 557 597 558 #ifdef SELM_TRACK_SHADOW_ LDT_CHANGES559 /** 560 * \#PF Virtual Handler callback for Guest write access to the VBox shadow LDT.598 #ifdef SELM_TRACK_SHADOW_TSS_CHANGES 599 /** 600 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS. 561 601 * 562 602 * @returns VBox status code (appropriate for trap handling and GC return). … … 572 612 * @param pvUser Unused. 573 613 */ 574 DECLEXPORT(int) selmRCShadowLDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 575 RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 576 { 577 LogRel(("FATAL ERROR: selmRCShadowLDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange)); 578 Assert(pvFault - (uintptr_t)pVM->selm.s.pvLdtRC < (unsigned)(65536U + PAGE_SIZE)); 579 NOREF(pVM); NOREF(pVCpu); NOREF(uErrorCode); NOREF(pRegFrame); NOREF(pvFault); NOREF(pvRange); NOREF(offRange); NOREF(pvUser); 580 return VERR_SELM_SHADOW_LDT_WRITE; 581 } 582 #endif 583 584 585 #ifdef SELM_TRACK_SHADOW_TSS_CHANGES 586 /** 587 * \#PF Virtual Handler callback for Guest write access to the VBox shadow TSS. 588 * 589 * @returns VBox status code (appropriate for trap handling and GC return). 590 * @param pVM Pointer to the VM. 591 * @param pVCpu Pointer to the cross context CPU context for the 592 * calling EMT. 593 * @param uErrorCode CPU Error code. 594 * @param pRegFrame Trap register frame. 595 * @param pvFault The fault address (cr2). 596 * @param pvRange The base address of the handled virtual range. 597 * @param offRange The offset of the access into this range. 598 * (If it's a EIP range this is the EIP, if not it's pvFault.) 599 * @param pvUser Unused. 600 */ 601 DECLEXPORT(int) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, 602 RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 614 DECLEXPORT(VBOXSTRICTRC) selmRCShadowTSSWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 615 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 603 616 { 604 617 LogRel(("FATAL ERROR: selmRCShadowTSSWritePfHandler: eip=%08X pvFault=%RGv pvRange=%RGv\r\n", pRegFrame->eip, pvFault, pvRange)); -
trunk/src/VBox/VMM/VMMRC/TRPMRC.cpp
r55900 r56013 105 105 * @param pvUser Unused. 106 106 */ 107 DECLEXPORT( int) trpmRCGuestIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,108 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)107 DECLEXPORT(VBOXSTRICTRC) trpmRCGuestIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 108 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 109 109 { 110 110 uint16_t cbIDT; … … 171 171 * @param pvUser Unused. 172 172 */ 173 DECLEXPORT( int) trpmRCShadowIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault,174 RTGCPTR pvRange, uintptr_t offRange, void *pvUser)173 DECLEXPORT(VBOXSTRICTRC) trpmRCShadowIDTWritePfHandler(PVM pVM, PVMCPU pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 174 RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange, void *pvUser) 175 175 { 176 176 LogRel(("FATAL ERROR: trpmRCShadowIDTWritePfHandler: eip=%08X pvFault=%RGv pvRange=%08RGv\r\n", pRegFrame->eip, pvFault, pvRange)); -
trunk/src/VBox/VMM/include/CSAMInternal.h
r55937 r56013 282 282 } 283 283 284 PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER) csamCodePageWriteHandler; 284 285 RT_C_DECLS_BEGIN 285 286 DECLEXPORT(FNPGMRCVIRTPFHANDLER) csamRCCodePageWritePfHandler; 286 287 RT_C_DECLS_END 287 PGM_ALL_CB2_DECL(FNPGMR3VIRTHANDLER) csamCodePageWriteHandler; 288 289 #endif 288 289 #endif -
trunk/src/VBox/VMM/include/PATMInternal.h
r55966 r56013 687 687 void patmR3DbgAddPatch(PVM pVM, PPATMPATCHREC pPatchRec); 688 688 689 PGM_ALL_CB2_DECL(FNPGM R3VIRTHANDLER) patmVirtPageHandler;690 691 #endif 689 PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER) patmVirtPageHandler; 690 691 #endif -
trunk/src/VBox/VMM/include/PGMInternal.h
r55966 r56013 715 715 R3PTRTYPE(PFNPGMR3VIRTINVALIDATE) pfnInvalidateR3; 716 716 /** Pointer to R3 callback function. */ 717 R3PTRTYPE(PFNPGM R3VIRTHANDLER)pfnHandlerR3;717 R3PTRTYPE(PFNPGMVIRTHANDLER) pfnHandlerR3; 718 718 /** Description / Name. For easing debugging. */ 719 719 R3PTRTYPE(const char *) pszDesc; -
trunk/src/VBox/VMM/include/SELMInternal.h
r55895 r56013 230 230 RT_C_DECLS_BEGIN 231 231 232 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestGDTWritePfHandler; 233 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestLDTWritePfHandler; 234 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestTSSWritePfHandler; 235 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowGDTWritePfHandler; 236 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowLDTWritePfHandler; 237 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowTSSWritePfHandler; 238 239 void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp); 232 PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER) selmGuestGDTWriteHandler; 233 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestGDTWritePfHandler; 234 PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER) selmGuestLDTWriteHandler; 235 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestLDTWritePfHandler; 236 PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER) selmGuestTSSWriteHandler; 237 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCGuestTSSWritePfHandler; 238 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowGDTWritePfHandler; 239 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowLDTWritePfHandler; 240 DECLEXPORT(FNPGMRCVIRTPFHANDLER) selmRCShadowTSSWritePfHandler; 241 242 void selmRCSyncGdtSegRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, unsigned iGDTEntry); 243 void selmRCGuestGdtPreWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestGdt, uint32_t cbWrite, PCPUMCTX pCtx); 244 VBOXSTRICTRC selmRCGuestGdtPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite, PCPUMCTX pCtx); 245 VBOXSTRICTRC selmRCGuestTssPostWriteCheck(PVM pVM, PVMCPU pVCpu, uint32_t offGuestTss, uint32_t cbWrite); 246 void selmSetRing1Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp); 240 247 #ifdef VBOX_WITH_RAW_RING1 241 void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp);248 void selmSetRing2Stack(PVM pVM, uint32_t ss, RTGCPTR32 esp); 242 249 #endif 243 250 -
trunk/src/VBox/VMM/include/TRPMInternal.h
r55895 r56013 25 25 #include <VBox/vmm/pgm.h> 26 26 27 28 29 /** Enable to allow trap forwarding in GC. */30 #ifdef VBOX_WITH_RAW_MODE31 # define TRPM_FORWARD_TRAPS_IN_GC32 #endif33 34 /** First interrupt handler. Used for validating input. */35 #define TRPM_HANDLER_INT_BASE 0x2036 37 27 RT_C_DECLS_BEGIN 38 28 … … 43 33 * @{ 44 34 */ 35 36 37 #ifdef VBOX_WITH_RAW_MODE 38 /** Enable or disable tracking of Guest's IDT. */ 39 # define TRPM_TRACK_GUEST_IDT_CHANGES 40 /** Enable or disable tracking of Shadow IDT. */ 41 # define TRPM_TRACK_SHADOW_IDT_CHANGES 42 #endif 43 44 45 /** Enable to allow trap forwarding in GC. */ 46 #ifdef VBOX_WITH_RAW_MODE 47 # define TRPM_FORWARD_TRAPS_IN_GC 48 #endif 49 50 /** First interrupt handler. Used for validating input. */ 51 #define TRPM_HANDLER_INT_BASE 0x20 52 45 53 46 54 /** @name TRPMGCTrapIn* flags. … … 249 257 250 258 251 DECLEXPORT(FNPGMRCVIRTPFHANDLER) trpmRCGuestIDTWritePfHandler; 252 DECLEXPORT(FNPGMRCVIRTPFHANDLER) trpmRCShadowIDTWritePfHandler; 259 PGM_ALL_CB2_DECL(FNPGMVIRTHANDLER) trpmGuestIDTWriteHandler; 260 DECLEXPORT(FNPGMRCVIRTPFHANDLER) trpmRCGuestIDTWritePfHandler; 261 DECLEXPORT(FNPGMRCVIRTPFHANDLER) trpmRCShadowIDTWritePfHandler; 253 262 254 263 /**
Note:
See TracChangeset
for help on using the changeset viewer.