Changeset 80161 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Aug 6, 2019 6:10:51 PM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 132602
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/AllPdbTypeHack.cpp
r76553 r80161 53 53 #include "../include/NEMInternal.h" 54 54 #include "../include/REMInternal.h" 55 #ifndef IN_RC 56 # include "../VMMR0/GMMR0Internal.h" 57 # include "../VMMR0/GVMMR0Internal.h" 58 #endif 59 #ifdef VBOX_WITH_RAW_MODE 60 # include "../include/CSAMInternal.h" 61 # include "../include/PATMInternal.h" 62 #endif 55 #include "../VMMR0/GMMR0Internal.h" 56 #include "../VMMR0/GVMMR0Internal.h" 63 57 #include <VBox/vmm/vm.h> 64 58 #ifdef IN_RING3 65 59 # include <VBox/vmm/uvm.h> 66 60 #endif 67 #ifndef IN_RC 68 # include <VBox/vmm/gvm.h> 69 #endif 61 #include <VBox/vmm/gvm.h> 70 62 71 63 -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r80055 r80161 425 425 } 426 426 427 #ifndef IN_RC428 427 429 428 /** … … 740 739 } 741 740 742 #endif /* !IN_RC */743 741 744 742 /** … … 769 767 pHistEntry->idxSlot = UINT32_MAX; 770 768 771 #ifndef IN_RC772 769 /* 773 770 * If common exit type, we will insert/update the exit into the exit record hash table. 774 771 */ 775 772 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 776 # 773 #ifdef IN_RING0 777 774 && pVCpu->em.s.fExitOptimizationEnabledR0 778 775 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 779 # 776 #else 780 777 && pVCpu->em.s.fExitOptimizationEnabled 781 # 778 #endif 782 779 && uFlatPC != UINT64_MAX 783 780 ) 784 781 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo); 785 #endif786 782 return NULL; 787 783 } 788 789 790 #ifdef IN_RC791 /**792 * Special raw-mode interface for adding an exit to the history.793 *794 * Currently this is only for recording, not optimizing, so no return value. If795 * we start seriously caring about raw-mode again, we may extend it.796 *797 * @param pVCpu The cross context virtual CPU structure.798 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).799 * @param uCs The CS.800 * @param uEip The EIP.801 * @param uTimestamp The TSC value for the exit, 0 if not available.802 * @thread EMT(0)803 */804 VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)805 {806 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);807 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];808 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;809 pHistEntry->uTimestamp = uTimestamp;810 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;811 pHistEntry->idxSlot = UINT32_MAX;812 }813 #endif814 784 815 785 … … 858 828 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)); 859 829 860 #ifndef IN_RC861 830 /* 862 831 * If common exit type, we will insert/update the exit into the exit record hash table. 863 832 */ 864 833 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 865 # 834 #ifdef IN_RING0 866 835 && pVCpu->em.s.fExitOptimizationEnabledR0 867 836 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 868 # 837 #else 869 838 && pVCpu->em.s.fExitOptimizationEnabled 870 # 839 #endif 871 840 && pHistEntry->uFlatPC != UINT64_MAX 872 841 ) 873 842 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo); 874 #endif875 843 return NULL; 876 844 } … … 903 871 pHistEntry->uFlatPC = uFlatPC; 904 872 905 #ifndef IN_RC906 873 /* 907 874 * If common exit type, we will insert/update the exit into the exit record hash table. 908 875 */ 909 876 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 910 # 877 #ifdef IN_RING0 911 878 && pVCpu->em.s.fExitOptimizationEnabledR0 912 879 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 913 # 880 #else 914 881 && pVCpu->em.s.fExitOptimizationEnabled 915 # 882 #endif 916 883 ) 917 884 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo); 918 #endif919 885 return NULL; 920 886 } … … 1027 993 if (RT_FAILURE(rc)) 1028 994 { 1029 #ifndef IN_RC1030 995 /* 1031 996 * If we fail to find the page via the guest's page tables … … 1039 1004 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1); 1040 1005 } 1041 #endif1042 1006 } 1043 1007 } -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r79118 r80161 31 31 32 32 33 #ifndef IN_RC34 33 35 34 /** … … 120 119 } 121 120 122 # 121 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 123 122 /** 124 123 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g. … … 171 170 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 172 171 } 173 # 172 #endif 174 173 175 174 /** … … 268 267 } 269 268 270 #endif /* !IN_RC */271 269 272 270 -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r80034 r80161 880 880 VMM_INT_DECL(void) HMDumpHwvirtVmxState(PVMCPU pVCpu) 881 881 { 882 #ifndef IN_RC883 882 /* The string width of -4 used in the macros below to cover 'LDTR', 'GDTR', 'IDTR. */ 884 # 883 #define HMVMX_DUMP_HOST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 885 884 do { \ 886 885 LogRel((" %s%-4s = {base=%016RX64}\n", \ 887 886 (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Host##a_Seg##Base.u)); \ 888 887 } while (0) 889 # 888 #define HMVMX_DUMP_HOST_FS_GS_TR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 890 889 do { \ 891 890 LogRel((" %s%-4s = {%04x base=%016RX64}\n", \ 892 891 (a_pszPrefix), (a_SegName), (a_pVmcs)->Host##a_Seg, (a_pVmcs)->u64Host##a_Seg##Base.u)); \ 893 892 } while (0) 894 # 893 #define HMVMX_DUMP_GUEST_SEGREG(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 895 894 do { \ 896 895 LogRel((" %s%-4s = {%04x base=%016RX64 limit=%08x flags=%04x}\n", \ … … 898 897 (a_pVmcs)->u32Guest##a_Seg##Limit, (a_pVmcs)->u32Guest##a_Seg##Attr)); \ 899 898 } while (0) 900 # 899 #define HMVMX_DUMP_GUEST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 901 900 do { \ 902 901 LogRel((" %s%-4s = {base=%016RX64 limit=%08x}\n", \ … … 1144 1143 } 1145 1144 1146 # undef HMVMX_DUMP_HOST_XDTR 1147 # undef HMVMX_DUMP_HOST_FS_GS_TR 1148 # undef HMVMX_DUMP_GUEST_SEGREG 1149 # undef HMVMX_DUMP_GUEST_XDTR 1150 #else 1151 NOREF(pVCpu); 1152 #endif /* !IN_RC */ 1145 #undef HMVMX_DUMP_HOST_XDTR 1146 #undef HMVMX_DUMP_HOST_FS_GS_TR 1147 #undef HMVMX_DUMP_GUEST_SEGREG 1148 #undef HMVMX_DUMP_GUEST_XDTR 1153 1149 } 1154 1150 … … 1290 1286 1291 1287 1292 #ifndef IN_RC 1293 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1288 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1294 1289 /** 1295 1290 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in … … 1364 1359 } 1365 1360 1366 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1367 #endif /* IN_RC */ 1368 1361 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1362 -
trunk/src/VBox/VMM/VMMAll/MMAll.cpp
r76553 r80161 210 210 DECLINLINE(PMMLOOKUPHYPER) mmHyperLookupCC(PVM pVM, void *pv, uint32_t *poff) 211 211 { 212 #ifdef IN_RC 213 return mmHyperLookupRC(pVM, (RTRCPTR)pv, poff); 214 #elif defined(IN_RING0) 212 #ifdef IN_RING0 215 213 return mmHyperLookupR0(pVM, pv, poff); 214 #elif defined(IN_RING3) 215 return mmHyperLookupR3(pVM, pv, poff); 216 216 #else 217 return mmHyperLookupR3(pVM, pv, poff); 217 # error "Neither IN_RING0 nor IN_RING3!" 218 218 #endif 219 219 } … … 301 301 DECLINLINE(void *) mmHyperLookupCalcCC(PVM pVM, PMMLOOKUPHYPER pLookup, uint32_t off) 302 302 { 303 #ifdef IN_RC 304 return (void *)mmHyperLookupCalcRC(pVM, pLookup, off); 305 #elif defined(IN_RING0) 303 #ifdef IN_RING0 306 304 return mmHyperLookupCalcR0(pVM, pLookup, off); 307 #el se305 #elif defined(IN_RING3) 308 306 NOREF(pVM); 309 307 return mmHyperLookupCalcR3(pLookup, off); 308 #else 309 # error "Neither IN_RING0 nor IN_RING3!" 310 310 #endif 311 311 } … … 469 469 } 470 470 471 #ifndef IN_RC 471 472 472 /** 473 473 * Converts a raw-mode context address in the Hypervisor memory region to a current context address. … … 487 487 return NULL; 488 488 } 489 #endif 489 490 490 491 491 #ifndef IN_RING3 … … 530 530 531 531 532 #ifndef IN_RC533 532 /** 534 533 * Converts a current context address in the Hypervisor memory region to a raw-mode context address. … … 548 547 return NIL_RTRCPTR; 549 548 } 550 #endif551 549 552 550 -
trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp
r76553 r80161 167 167 #endif 168 168 int rc = PDMCritSectEnter(&pHeap->Lock, VERR_SEM_BUSY); 169 #if defined(IN_RC) || defined(IN_RING0)169 #ifdef IN_RING0 170 170 if (rc == VERR_SEM_BUSY) 171 171 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_MMHYPER_LOCK, 0); -
trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp
r76553 r80161 40 40 41 41 42 #if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC)42 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 43 43 44 44 /** … … 54 54 void *mmPagePoolPhys2Ptr(PMMPAGEPOOL pPool, RTHCPHYS HCPhys) 55 55 { 56 # if 0 /** @todo have to fix the debugger, but until then this is going on my nerves. */57 # ifdef IN_RING356 # if 0 /** @todo have to fix the debugger, but until then this is going on my nerves. */ 57 # ifdef IN_RING3 58 58 VM_ASSERT_EMT(pPool->pVM); 59 # endif60 # endif59 # endif 60 # endif 61 61 62 62 /* -
trunk/src/VBox/VMM/VMMAll/REMAll.cpp
r76553 r80161 199 199 #endif /* !IN_RING3 */ 200 200 201 #ifdef IN_RC202 /**203 * Flushes the physical handler notifications if the queue is almost full.204 *205 * This is for avoiding trouble in RC when changing CR3.206 *207 * @param pVM The cross context VM structure.208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.209 */210 VMMDECL(void) REMNotifyHandlerPhysicalFlushIfAlmostFull(PVM pVM, PVMCPU pVCpu)211 {212 Assert(pVM->cCpus == 1); NOREF(pVCpu);213 214 /*215 * Less than 48 items means we should flush.216 */217 uint32_t cFree = 0;218 for (uint32_t idx = pVM->rem.s.idxFreeList;219 idx != UINT32_MAX;220 idx = pVM->rem.s.aHandlerNotifications[idx].idxNext)221 {222 Assert(idx < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));223 if (++cFree >= 48)224 return;225 }226 AssertRelease(VM_FF_IS_SET(pVM, VM_FF_REM_HANDLER_NOTIFY));227 AssertRelease(pVM->rem.s.idxPendingList != UINT32_MAX);228 229 /* Ok, we gotta flush them. */230 VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS, 0);231 232 AssertRelease(pVM->rem.s.idxPendingList == UINT32_MAX);233 AssertRelease(pVM->rem.s.idxFreeList != UINT32_MAX);234 }235 #endif /* IN_RC */236 237 201 238 202 /** -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r80055 r80161 38 38 39 39 40 /*********************************************************************************************************************************41 * Global Variables *42 *********************************************************************************************************************************/43 #if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)44 /** Segment register names. */45 static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };46 #endif47 48 40 49 41 /** … … 80 72 } 81 73 82 #ifdef VBOX_WITH_RAW_MODE_NOT_R083 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */84 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))85 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);86 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))87 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);88 #else89 74 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 90 75 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 91 #endif92 76 93 77 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 … … 153 137 } 154 138 155 #ifdef VBOX_WITH_RAW_MODE_NOT_R0156 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))157 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);158 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))159 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);160 #else161 139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 162 140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 163 #endif164 141 165 142 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r76553 r80161 142 142 case TMTSCMODE_NATIVE_API: 143 143 { 144 #ifndef IN_RC145 144 int rc = NEMHCResumeCpuTickOnAll(pVM, pVCpu, pVM->tm.s.u64LastPausedTSC); 146 145 AssertRCReturn(rc, rc); 147 146 pVCpu->tm.s.offTSCRawSrc = offTSCRawSrcOld = 0; 148 #else149 AssertFailedReturn(VERR_INTERNAL_ERROR_2);150 #endif151 147 break; 152 148 } … … 454 450 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); 455 451 break; 456 #ifndef IN_RC457 452 case TMTSCMODE_NATIVE_API: 458 453 { … … 462 457 break; 463 458 } 464 #endif465 459 default: 466 460 AssertFailedBreakStmt(u64 = SUPReadTsc()); -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r76553 r80161 90 90 case SUPGIPMODE_SYNC_TSC: 91 91 case SUPGIPMODE_INVARIANT_TSC: 92 #if defined(IN_RC) || defined(IN_RING0)92 #ifdef IN_RING0 93 93 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO) 94 94 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta; … … 112 112 113 113 case SUPGIPMODE_ASYNC_TSC: 114 #if defined(IN_RC) || defined(IN_RING0)114 #ifdef IN_RING0 115 115 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync; 116 116 #else -
trunk/src/VBox/VMM/VMMAll/VMAll.cpp
r76553 r80161 30 30 #include <iprt/assert.h> 31 31 #include <iprt/string.h> 32 #ifndef IN_RC 33 # include <iprt/thread.h> 34 #endif 32 #include <iprt/thread.h> 35 33 36 34 -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r80003 r80161 163 163 164 164 165 #ifndef IN_RC166 165 /** 167 166 * Counterpart to vmmInitFormatTypes, called by VMMR3Term and VMMR0Term. … … 172 171 RTStrFormatTypeDeregister("vmcpuset"); 173 172 } 174 #endif175 173 176 174
Note:
See TracChangeset
for help on using the changeset viewer.