- Timestamp:
- Aug 6, 2019 6:10:51 PM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 132602
- Location:
- trunk/src
- Files:
-
- 21 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/AllPdbTypeHack.cpp
r76553 r80161 53 53 #include "../include/NEMInternal.h" 54 54 #include "../include/REMInternal.h" 55 #ifndef IN_RC 56 # include "../VMMR0/GMMR0Internal.h" 57 # include "../VMMR0/GVMMR0Internal.h" 58 #endif 59 #ifdef VBOX_WITH_RAW_MODE 60 # include "../include/CSAMInternal.h" 61 # include "../include/PATMInternal.h" 62 #endif 55 #include "../VMMR0/GMMR0Internal.h" 56 #include "../VMMR0/GVMMR0Internal.h" 63 57 #include <VBox/vmm/vm.h> 64 58 #ifdef IN_RING3 65 59 # include <VBox/vmm/uvm.h> 66 60 #endif 67 #ifndef IN_RC 68 # include <VBox/vmm/gvm.h> 69 #endif 61 #include <VBox/vmm/gvm.h> 70 62 71 63 -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r80055 r80161 425 425 } 426 426 427 #ifndef IN_RC428 427 429 428 /** … … 740 739 } 741 740 742 #endif /* !IN_RC */743 741 744 742 /** … … 769 767 pHistEntry->idxSlot = UINT32_MAX; 770 768 771 #ifndef IN_RC772 769 /* 773 770 * If common exit type, we will insert/update the exit into the exit record hash table. 774 771 */ 775 772 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 776 # 773 #ifdef IN_RING0 777 774 && pVCpu->em.s.fExitOptimizationEnabledR0 778 775 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 779 # 776 #else 780 777 && pVCpu->em.s.fExitOptimizationEnabled 781 # 778 #endif 782 779 && uFlatPC != UINT64_MAX 783 780 ) 784 781 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo); 785 #endif786 782 return NULL; 787 783 } 788 789 790 #ifdef IN_RC791 /**792 * Special raw-mode interface for adding an exit to the history.793 *794 * Currently this is only for recording, not optimizing, so no return value. If795 * we start seriously caring about raw-mode again, we may extend it.796 *797 * @param pVCpu The cross context virtual CPU structure.798 * @param uFlagsAndType Combined flags and type (see EMEXIT_MAKE_FLAGS_AND_TYPE).799 * @param uCs The CS.800 * @param uEip The EIP.801 * @param uTimestamp The TSC value for the exit, 0 if not available.802 * @thread EMT(0)803 */804 VMMRC_INT_DECL(void) EMRCHistoryAddExitCsEip(PVMCPU pVCpu, uint32_t uFlagsAndType, uint16_t uCs, uint32_t uEip, uint64_t uTimestamp)805 {806 AssertCompile(RT_ELEMENTS(pVCpu->em.s.aExitHistory) == 256);807 PEMEXITENTRY pHistEntry = &pVCpu->em.s.aExitHistory[(uintptr_t)(pVCpu->em.s.iNextExit++) & 0xff];808 pHistEntry->uFlatPC = ((uint64_t)uCs << 32) | uEip;809 pHistEntry->uTimestamp = uTimestamp;810 pHistEntry->uFlagsAndType = uFlagsAndType | EMEXIT_F_CS_EIP;811 pHistEntry->idxSlot = UINT32_MAX;812 }813 #endif814 784 815 785 … … 858 828 pHistEntry->uFlagsAndType = uFlagsAndType | (pHistEntry->uFlagsAndType & (EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)); 859 829 860 #ifndef IN_RC861 830 /* 862 831 * If common exit type, we will insert/update the exit into the exit record hash table. 863 832 */ 864 833 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 865 # 834 #ifdef IN_RING0 866 835 && pVCpu->em.s.fExitOptimizationEnabledR0 867 836 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 868 # 837 #else 869 838 && pVCpu->em.s.fExitOptimizationEnabled 870 # 839 #endif 871 840 && pHistEntry->uFlatPC != UINT64_MAX 872 841 ) 873 842 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, pHistEntry->uFlatPC, pHistEntry, uExitNo); 874 #endif875 843 return NULL; 876 844 } … … 903 871 pHistEntry->uFlatPC = uFlatPC; 904 872 905 #ifndef IN_RC906 873 /* 907 874 * If common exit type, we will insert/update the exit into the exit record hash table. 908 875 */ 909 876 if ( (uFlagsAndType & (EMEXIT_F_KIND_MASK | EMEXIT_F_CS_EIP | EMEXIT_F_UNFLATTENED_PC)) == EMEXIT_F_KIND_EM 910 # 877 #ifdef IN_RING0 911 878 && pVCpu->em.s.fExitOptimizationEnabledR0 912 879 && ( !(uFlagsAndType & EMEXIT_F_HM) || pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled) 913 # 880 #else 914 881 && pVCpu->em.s.fExitOptimizationEnabled 915 # 882 #endif 916 883 ) 917 884 return emHistoryAddOrUpdateRecord(pVCpu, uFlagsAndType, uFlatPC, pHistEntry, uExitNo); 918 #endif919 885 return NULL; 920 886 } … … 1027 993 if (RT_FAILURE(rc)) 1028 994 { 1029 #ifndef IN_RC1030 995 /* 1031 996 * If we fail to find the page via the guest's page tables … … 1039 1004 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1); 1040 1005 } 1041 #endif1042 1006 } 1043 1007 } -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r79118 r80161 31 31 32 32 33 #ifndef IN_RC34 33 35 34 /** … … 120 119 } 121 120 122 # 121 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 123 122 /** 124 123 * Notification callback for when a \#VMEXIT happens outside SVM R0 code (e.g. … … 171 170 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 172 171 } 173 # 172 #endif 174 173 175 174 /** … … 268 267 } 269 268 270 #endif /* !IN_RC */271 269 272 270 -
trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp
r80034 r80161 880 880 VMM_INT_DECL(void) HMDumpHwvirtVmxState(PVMCPU pVCpu) 881 881 { 882 #ifndef IN_RC883 882 /* The string width of -4 used in the macros below to cover 'LDTR', 'GDTR', 'IDTR. */ 884 # 883 #define HMVMX_DUMP_HOST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 885 884 do { \ 886 885 LogRel((" %s%-4s = {base=%016RX64}\n", \ 887 886 (a_pszPrefix), (a_SegName), (a_pVmcs)->u64Host##a_Seg##Base.u)); \ 888 887 } while (0) 889 # 888 #define HMVMX_DUMP_HOST_FS_GS_TR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 890 889 do { \ 891 890 LogRel((" %s%-4s = {%04x base=%016RX64}\n", \ 892 891 (a_pszPrefix), (a_SegName), (a_pVmcs)->Host##a_Seg, (a_pVmcs)->u64Host##a_Seg##Base.u)); \ 893 892 } while (0) 894 # 893 #define HMVMX_DUMP_GUEST_SEGREG(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 895 894 do { \ 896 895 LogRel((" %s%-4s = {%04x base=%016RX64 limit=%08x flags=%04x}\n", \ … … 898 897 (a_pVmcs)->u32Guest##a_Seg##Limit, (a_pVmcs)->u32Guest##a_Seg##Attr)); \ 899 898 } while (0) 900 # 899 #define HMVMX_DUMP_GUEST_XDTR(a_pVmcs, a_Seg, a_SegName, a_pszPrefix) \ 901 900 do { \ 902 901 LogRel((" %s%-4s = {base=%016RX64 limit=%08x}\n", \ … … 1144 1143 } 1145 1144 1146 # undef HMVMX_DUMP_HOST_XDTR 1147 # undef HMVMX_DUMP_HOST_FS_GS_TR 1148 # undef HMVMX_DUMP_GUEST_SEGREG 1149 # undef HMVMX_DUMP_GUEST_XDTR 1150 #else 1151 NOREF(pVCpu); 1152 #endif /* !IN_RC */ 1145 #undef HMVMX_DUMP_HOST_XDTR 1146 #undef HMVMX_DUMP_HOST_FS_GS_TR 1147 #undef HMVMX_DUMP_GUEST_SEGREG 1148 #undef HMVMX_DUMP_GUEST_XDTR 1153 1149 } 1154 1150 … … 1290 1286 1291 1287 1292 #ifndef IN_RC 1293 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1288 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 1294 1289 /** 1295 1290 * Notification callback for when a VM-exit happens outside VMX R0 code (e.g. in … … 1364 1359 } 1365 1360 1366 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1367 #endif /* IN_RC */ 1368 1361 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX */ 1362 -
trunk/src/VBox/VMM/VMMAll/MMAll.cpp
r76553 r80161 210 210 DECLINLINE(PMMLOOKUPHYPER) mmHyperLookupCC(PVM pVM, void *pv, uint32_t *poff) 211 211 { 212 #ifdef IN_RC 213 return mmHyperLookupRC(pVM, (RTRCPTR)pv, poff); 214 #elif defined(IN_RING0) 212 #ifdef IN_RING0 215 213 return mmHyperLookupR0(pVM, pv, poff); 214 #elif defined(IN_RING3) 215 return mmHyperLookupR3(pVM, pv, poff); 216 216 #else 217 return mmHyperLookupR3(pVM, pv, poff); 217 # error "Neither IN_RING0 nor IN_RING3!" 218 218 #endif 219 219 } … … 301 301 DECLINLINE(void *) mmHyperLookupCalcCC(PVM pVM, PMMLOOKUPHYPER pLookup, uint32_t off) 302 302 { 303 #ifdef IN_RC 304 return (void *)mmHyperLookupCalcRC(pVM, pLookup, off); 305 #elif defined(IN_RING0) 303 #ifdef IN_RING0 306 304 return mmHyperLookupCalcR0(pVM, pLookup, off); 307 #el se305 #elif defined(IN_RING3) 308 306 NOREF(pVM); 309 307 return mmHyperLookupCalcR3(pLookup, off); 308 #else 309 # error "Neither IN_RING0 nor IN_RING3!" 310 310 #endif 311 311 } … … 469 469 } 470 470 471 #ifndef IN_RC 471 472 472 /** 473 473 * Converts a raw-mode context address in the Hypervisor memory region to a current context address. … … 487 487 return NULL; 488 488 } 489 #endif 489 490 490 491 491 #ifndef IN_RING3 … … 530 530 531 531 532 #ifndef IN_RC533 532 /** 534 533 * Converts a current context address in the Hypervisor memory region to a raw-mode context address. … … 548 547 return NIL_RTRCPTR; 549 548 } 550 #endif551 549 552 550 -
trunk/src/VBox/VMM/VMMAll/MMAllHyper.cpp
r76553 r80161 167 167 #endif 168 168 int rc = PDMCritSectEnter(&pHeap->Lock, VERR_SEM_BUSY); 169 #if defined(IN_RC) || defined(IN_RING0)169 #ifdef IN_RING0 170 170 if (rc == VERR_SEM_BUSY) 171 171 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_MMHYPER_LOCK, 0); -
trunk/src/VBox/VMM/VMMAll/MMAllPagePool.cpp
r76553 r80161 40 40 41 41 42 #if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(IN_RC)42 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 43 43 44 44 /** … … 54 54 void *mmPagePoolPhys2Ptr(PMMPAGEPOOL pPool, RTHCPHYS HCPhys) 55 55 { 56 # if 0 /** @todo have to fix the debugger, but until then this is going on my nerves. */57 # ifdef IN_RING356 # if 0 /** @todo have to fix the debugger, but until then this is going on my nerves. */ 57 # ifdef IN_RING3 58 58 VM_ASSERT_EMT(pPool->pVM); 59 # endif60 # endif59 # endif 60 # endif 61 61 62 62 /* -
trunk/src/VBox/VMM/VMMAll/REMAll.cpp
r76553 r80161 199 199 #endif /* !IN_RING3 */ 200 200 201 #ifdef IN_RC202 /**203 * Flushes the physical handler notifications if the queue is almost full.204 *205 * This is for avoiding trouble in RC when changing CR3.206 *207 * @param pVM The cross context VM structure.208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.209 */210 VMMDECL(void) REMNotifyHandlerPhysicalFlushIfAlmostFull(PVM pVM, PVMCPU pVCpu)211 {212 Assert(pVM->cCpus == 1); NOREF(pVCpu);213 214 /*215 * Less than 48 items means we should flush.216 */217 uint32_t cFree = 0;218 for (uint32_t idx = pVM->rem.s.idxFreeList;219 idx != UINT32_MAX;220 idx = pVM->rem.s.aHandlerNotifications[idx].idxNext)221 {222 Assert(idx < RT_ELEMENTS(pVM->rem.s.aHandlerNotifications));223 if (++cFree >= 48)224 return;225 }226 AssertRelease(VM_FF_IS_SET(pVM, VM_FF_REM_HANDLER_NOTIFY));227 AssertRelease(pVM->rem.s.idxPendingList != UINT32_MAX);228 229 /* Ok, we gotta flush them. */230 VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS, 0);231 232 AssertRelease(pVM->rem.s.idxPendingList == UINT32_MAX);233 AssertRelease(pVM->rem.s.idxFreeList != UINT32_MAX);234 }235 #endif /* IN_RC */236 237 201 238 202 /** -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r80055 r80161 38 38 39 39 40 /*********************************************************************************************************************************41 * Global Variables *42 *********************************************************************************************************************************/43 #if defined(LOG_ENABLED) && defined(VBOX_WITH_RAW_MODE_NOT_R0)44 /** Segment register names. */45 static char const g_aszSRegNms[X86_SREG_COUNT][4] = { "ES", "CS", "SS", "DS", "FS", "GS" };46 #endif47 48 40 49 41 /** … … 80 72 } 81 73 82 #ifdef VBOX_WITH_RAW_MODE_NOT_R083 /** @todo when we're in 16 bits mode, we should cut off the address as well?? */84 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))85 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);86 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))87 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);88 #else89 74 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 90 75 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 91 #endif92 76 93 77 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 … … 153 137 } 154 138 155 #ifdef VBOX_WITH_RAW_MODE_NOT_R0156 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))157 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);158 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs))159 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, &pCtxCore->cs);160 #else161 139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 162 140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtxCore->cs)); 163 #endif164 141 165 142 /* 64 bits mode: CS, DS, ES and SS are treated as if each segment base is 0 -
trunk/src/VBox/VMM/VMMAll/TMAllCpu.cpp
r76553 r80161 142 142 case TMTSCMODE_NATIVE_API: 143 143 { 144 #ifndef IN_RC145 144 int rc = NEMHCResumeCpuTickOnAll(pVM, pVCpu, pVM->tm.s.u64LastPausedTSC); 146 145 AssertRCReturn(rc, rc); 147 146 pVCpu->tm.s.offTSCRawSrc = offTSCRawSrcOld = 0; 148 #else149 AssertFailedReturn(VERR_INTERNAL_ERROR_2);150 #endif151 147 break; 152 148 } … … 454 450 u64 = tmCpuTickGetRawVirtual(pVM, fCheckTimers); 455 451 break; 456 #ifndef IN_RC457 452 case TMTSCMODE_NATIVE_API: 458 453 { … … 462 457 break; 463 458 } 464 #endif465 459 default: 466 460 AssertFailedBreakStmt(u64 = SUPReadTsc()); -
trunk/src/VBox/VMM/VMMAll/TMAllVirtual.cpp
r76553 r80161 90 90 case SUPGIPMODE_SYNC_TSC: 91 91 case SUPGIPMODE_INVARIANT_TSC: 92 #if defined(IN_RC) || defined(IN_RING0)92 #ifdef IN_RING0 93 93 if (pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO) 94 94 pfnWorker = fLFence ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarNoDelta; … … 112 112 113 113 case SUPGIPMODE_ASYNC_TSC: 114 #if defined(IN_RC) || defined(IN_RING0)114 #ifdef IN_RING0 115 115 pfnWorker = fLFence ? RTTimeNanoTSLFenceAsync : RTTimeNanoTSLegacyAsync; 116 116 #else -
trunk/src/VBox/VMM/VMMAll/VMAll.cpp
r76553 r80161 30 30 #include <iprt/assert.h> 31 31 #include <iprt/string.h> 32 #ifndef IN_RC 33 # include <iprt/thread.h> 34 #endif 32 #include <iprt/thread.h> 35 33 36 34 -
trunk/src/VBox/VMM/VMMAll/VMMAll.cpp
r80003 r80161 163 163 164 164 165 #ifndef IN_RC166 165 /** 167 166 * Counterpart to vmmInitFormatTypes, called by VMMR3Term and VMMR0Term. … … 172 171 RTStrFormatTypeDeregister("vmcpuset"); 173 172 } 174 #endif175 173 176 174 -
trunk/src/VBox/VMM/VMMR3/DBGFAddrSpace.cpp
r80007 r80161 640 640 LogRel(("DBGF: Lazy init of RC address space\n")); 641 641 PDMR3LdrEnumModules(pUVM->pVM, dbgfR3AsLazyPopulateRCCallback, hDbgAs); 642 #ifdef VBOX_WITH_RAW_MODE643 PATMR3DbgPopulateAddrSpace(pUVM->pVM, hDbgAs);644 #endif645 642 } 646 643 else if (hAlias == DBGF_AS_PHYS && pUVM->pVM) -
trunk/src/VBox/VMM/VMMR3/DBGFDisas.cpp
r80014 r80161 78 78 /** 64 bits mode or not. */ 79 79 bool f64Bits; 80 /** Read original unpatched bytes from the patch manager. */81 bool fUnpatchedBytes;82 /** Set when fUnpatchedBytes is active and we encounter patched bytes. */83 bool fPatchedInstr;84 80 } DBGFDISASSTATE, *PDBGFDISASSTATE; 85 81 … … 113 109 pState->GCPtrPage = 0; 114 110 pState->pvPageR3 = NULL; 115 pState->hDbgAs = VM_IS_RAW_MODE_ENABLED(pVM) 116 ? DBGF_AS_RC_AND_GC_GLOBAL 117 : DBGF_AS_GLOBAL; 111 pState->hDbgAs = DBGF_AS_GLOBAL; 118 112 pState->pVM = pVM; 119 113 pState->pVCpu = pVCpu; 120 114 pState->fLocked = false; 121 115 pState->f64Bits = enmMode >= PGMMODE_AMD64 && pSelInfo->u.Raw.Gen.u1Long; 122 #ifdef VBOX_WITH_RAW_MODE123 pState->fUnpatchedBytes = RT_BOOL(fFlags & DBGF_DISAS_FLAGS_UNPATCHED_BYTES);124 pState->fPatchedInstr = false;125 #endif126 116 127 117 DISCPUMODE enmCpuMode; … … 232 222 /* translate the address */ 233 223 pState->GCPtrPage = GCPtr & PAGE_BASE_GC_MASK; 234 if ( VM_IS_RAW_MODE_ENABLED(pState->pVM) 235 && MMHyperIsInsideArea(pState->pVM, pState->GCPtrPage)) 236 { 237 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->GCPtrPage); 238 if (!pState->pvPageR3) 239 rc = VERR_INVALID_POINTER; 240 } 224 if (pState->fLocked) 225 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock); 226 if (pState->enmMode <= PGMMODE_PROTECTED) 227 rc = PGMPhysGCPhys2CCPtrReadOnly(pState->pVM, pState->GCPtrPage, &pState->pvPageR3, &pState->PageMapLock); 228 else 229 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->GCPtrPage, &pState->pvPageR3, &pState->PageMapLock); 230 if (RT_SUCCESS(rc)) 231 pState->fLocked = true; 241 232 else 242 233 { 243 if (pState->fLocked) 244 PGMPhysReleasePageMappingLock(pState->pVM, &pState->PageMapLock); 245 246 if (pState->enmMode <= PGMMODE_PROTECTED) 247 rc = PGMPhysGCPhys2CCPtrReadOnly(pState->pVM, pState->GCPtrPage, &pState->pvPageR3, &pState->PageMapLock); 248 else 249 rc = PGMPhysGCPtr2CCPtrReadOnly(pState->pVCpu, pState->GCPtrPage, &pState->pvPageR3, &pState->PageMapLock); 250 pState->fLocked = RT_SUCCESS_NP(rc); 251 } 252 if (RT_FAILURE(rc)) 253 { 234 pState->fLocked = false; 254 235 pState->pvPageR3 = NULL; 255 236 return rc; … … 275 256 if (cb > cbMaxRead) 276 257 cb = cbMaxRead; 277 278 #ifdef VBOX_WITH_RAW_MODE279 /*280 * Read original bytes from PATM if asked to do so.281 */282 if (pState->fUnpatchedBytes)283 {284 size_t cbRead = cb;285 int rc = PATMR3ReadOrgInstr(pState->pVM, GCPtr, &pDis->abInstr[offInstr], cbRead, &cbRead);286 if (RT_SUCCESS(rc))287 {288 pState->fPatchedInstr = true;289 if (cbRead >= cbMinRead)290 {291 pDis->cbCachedInstr = offInstr + (uint8_t)cbRead;292 return rc;293 }294 295 cbMinRead -= (uint8_t)cbRead;296 cbMaxRead -= (uint8_t)cbRead;297 cb -= (uint8_t)cbRead;298 offInstr += (uint8_t)cbRead;299 GCPtr += cbRead;300 if (!cb)301 continue;302 }303 }304 #endif /* VBOX_WITH_RAW_MODE */305 258 306 259 /* … … 352 305 && DIS_FMT_SEL_GET_REG(u32Sel) == DISSELREG_SS 353 306 && pSelInfo->GCPtrBase == 0 354 && pSelInfo->cbLimit >= UINT32_MAX 355 #ifdef VBOX_WITH_RAW_MODE 356 && PATMIsPatchGCAddr(pState->pVM, pState->Cpu.uInstrAddr) 357 #endif 358 ) 307 && pSelInfo->cbLimit >= UINT32_MAX) 359 308 { 360 309 DBGFR3AddrFromFlat(pState->pVM->pUVM, &Addr, uAddress); … … 572 521 &SelInfo); 573 522 574 #ifdef VBOX_WITH_RAW_MODE575 /*576 * Patched instruction annotations.577 */578 char szPatchAnnotations[256];579 szPatchAnnotations[0] = '\0';580 if (fFlags & DBGF_DISAS_FLAGS_ANNOTATE_PATCHED)581 PATMR3DbgAnnotatePatchedInstruction(pVM, GCPtr, State.Cpu.cbInstr, szPatchAnnotations, sizeof(szPatchAnnotations));582 #endif583 584 523 /* 585 524 * Print it to the user specified buffer. … … 648 587 } 649 588 650 #ifdef VBOX_WITH_RAW_MODE651 if (szPatchAnnotations[0] && cch + 1 < cbOutput)652 RTStrPrintf(pszOutput + cch, cbOutput - cch, " ; %s", szPatchAnnotations);653 #endif654 655 589 if (pcbInstr) 656 590 *pcbInstr = State.Cpu.cbInstr; -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r80091 r80161 688 688 } 689 689 #ifndef IOM_NO_PDMINS_CHECKS 690 # ifndef IN_RC691 690 if (pRange->pDevIns != pDevIns) 692 # else693 if (pRange->pDevIns != MMHyperRCToCC(pVM, pDevIns))694 # endif695 691 { 696 692 AssertMsgFailed(("Not owner! Port=%#x %#x-%#x! (%s)\n", Port, PortStart, (unsigned)PortStart + cPorts - 1, pszDesc)); … … 804 800 } 805 801 #ifndef IOM_NO_PDMINS_CHECKS 806 # ifndef IN_RC807 802 if (pRange->pDevIns != pDevIns) 808 # else809 if (pRange->pDevIns != MMHyperRCToCC(pVM, pDevIns))810 # endif811 803 { 812 804 AssertMsgFailed(("Not owner! Port=%#x %#x-%#x! (%s)\n", Port, PortStart, (unsigned)PortStart + cPorts - 1, pszDesc)); -
trunk/src/VBox/VMM/VMMR3/MM.cpp
r80118 r80161 17 17 18 18 19 /* @page pg_mm MM - The Memory Manager19 /** @page pg_mm MM - The Memory Manager 20 20 * 21 21 * The memory manager is in charge of the following memory: -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r80135 r80161 2806 2806 RTR0PTR R0PtrChunk = NIL_RTR0PTR; 2807 2807 void *pvChunk = NULL; 2808 rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, 2809 #if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS) 2810 &R0PtrChunk, 2811 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE) 2812 VM_IS_HM_OR_NEM_ENABLED(pVM) ? &R0PtrChunk : NULL, 2813 #else 2814 NULL, 2815 #endif 2816 paChunkPages); 2808 rc = SUPR3PageAllocEx(cChunkPages, 0 /*fFlags*/, &pvChunk, &R0PtrChunk, paChunkPages); 2817 2809 AssertLogRelMsgRCBreakStmt(rc, ("rc=%Rrc, cChunkPages=%#zx\n", rc, cChunkPages), RTMemTmpFree(paChunkPages)); 2818 2810 2819 #if defined(VBOX_WITH_MORE_RING0_MEM_MAPPINGS)2820 2811 Assert(R0PtrChunk != NIL_RTR0PTR); 2821 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE)2822 if (!VM_IS_HM_OR_NEM_ENABLED(pVM))2823 R0PtrChunk = NIL_RTR0PTR;2824 #else2825 R0PtrChunk = (uintptr_t)pvChunk;2826 #endif2827 2812 memset(pvChunk, 0, cChunkPages << PAGE_SHIFT); 2828 2813 -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r80118 r80161 4219 4219 4220 4220 case VM_EXEC_ENGINE_NATIVE_API: 4221 #ifndef IN_RC4222 4221 return NEMHCIsLongModeAllowed(pVM); 4223 #else4224 return false;4225 #endif4226 4222 4227 4223 case VM_EXEC_ENGINE_NOT_SET: -
trunk/src/VBox/VMM/include/REMInternal.h
r76585 r80161 238 238 bool remR3CanExecuteRaw(CPUState *env, RTGCPTR eip, unsigned fFlags, int *piException); 239 239 void remR3CSAMCheckEIP(CPUState *env, RTGCPTR GCPtrCode); 240 # ifdef VBOX_WITH_RAW_MODE241 bool remR3GetOpcode(CPUState *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte);242 # endif243 240 bool remR3DisasInstr(CPUState *env, int f32BitCode, char *pszPrefix); 244 241 void remR3FlushPage(CPUState *env, RTGCPTR GCPtr); -
trunk/src/recompiler/VBoxRecompiler.c
r80024 r80161 941 941 * CPU_RAW_HM makes sure we never execute interrupt handlers in the recompiler. 942 942 */ 943 if (!VM_IS_RAW_MODE_ENABLED(pVM)) 944 pVM->rem.s.Env.state |= CPU_RAW_HM; 943 pVM->rem.s.Env.state |= CPU_RAW_HM; 945 944 946 945 /* Skip the TB flush as that's rather expensive and not necessary for single instruction emulation. */ … … 1417 1416 return false; 1418 1417 1419 if (!VM_IS_RAW_MODE_ENABLED(env->pVM))1420 {1421 1418 #ifdef RT_OS_WINDOWS 1422 1419 PCPUMCTX pCtx = alloca(sizeof(*pCtx)); 1423 1420 #else 1424 CPUMCTX Ctx; 1425 PCPUMCTX pCtx = &Ctx; 1426 #endif 1427 /** @todo NEM: scheduling. */ 1428 1429 env->state |= CPU_RAW_HM; 1430 1431 /* 1432 * Create partial context for HMCanExecuteGuest. 1433 */ 1434 pCtx->cr0 = env->cr[0]; 1435 pCtx->cr3 = env->cr[3]; 1436 pCtx->cr4 = env->cr[4]; 1437 1438 pCtx->tr.Sel = env->tr.selector; 1439 pCtx->tr.ValidSel = env->tr.selector; 1440 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID; 1441 pCtx->tr.u64Base = env->tr.base; 1442 pCtx->tr.u32Limit = env->tr.limit; 1443 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1444 1445 pCtx->ldtr.Sel = env->ldt.selector; 1446 pCtx->ldtr.ValidSel = env->ldt.selector; 1447 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 1448 pCtx->ldtr.u64Base = env->ldt.base; 1449 pCtx->ldtr.u32Limit = env->ldt.limit; 1450 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1451 1452 pCtx->idtr.cbIdt = env->idt.limit; 1453 pCtx->idtr.pIdt = env->idt.base; 1454 1455 pCtx->gdtr.cbGdt = env->gdt.limit; 1456 pCtx->gdtr.pGdt = env->gdt.base; 1457 1458 pCtx->rsp = env->regs[R_ESP]; 1459 pCtx->rip = env->eip; 1460 1461 pCtx->eflags.u32 = env->eflags; 1462 1463 pCtx->cs.Sel = env->segs[R_CS].selector; 1464 pCtx->cs.ValidSel = env->segs[R_CS].selector; 1465 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1466 pCtx->cs.u64Base = env->segs[R_CS].base; 1467 pCtx->cs.u32Limit = env->segs[R_CS].limit; 1468 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1469 1470 pCtx->ds.Sel = env->segs[R_DS].selector; 1471 pCtx->ds.ValidSel = env->segs[R_DS].selector; 1472 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID; 1473 pCtx->ds.u64Base = env->segs[R_DS].base; 1474 pCtx->ds.u32Limit = env->segs[R_DS].limit; 1475 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1476 1477 pCtx->es.Sel = env->segs[R_ES].selector; 1478 pCtx->es.ValidSel = env->segs[R_ES].selector; 1479 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID; 1480 pCtx->es.u64Base = env->segs[R_ES].base; 1481 pCtx->es.u32Limit = env->segs[R_ES].limit; 1482 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1483 1484 pCtx->fs.Sel = env->segs[R_FS].selector; 1485 pCtx->fs.ValidSel = env->segs[R_FS].selector; 1486 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID; 1487 pCtx->fs.u64Base = env->segs[R_FS].base; 1488 pCtx->fs.u32Limit = env->segs[R_FS].limit; 1489 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1490 1491 pCtx->gs.Sel = env->segs[R_GS].selector; 1492 pCtx->gs.ValidSel = env->segs[R_GS].selector; 1493 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID; 1494 pCtx->gs.u64Base = env->segs[R_GS].base; 1495 pCtx->gs.u32Limit = env->segs[R_GS].limit; 1496 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1497 1498 pCtx->ss.Sel = env->segs[R_SS].selector; 1499 pCtx->ss.ValidSel = env->segs[R_SS].selector; 1500 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 1501 pCtx->ss.u64Base = env->segs[R_SS].base; 1502 pCtx->ss.u32Limit = env->segs[R_SS].limit; 1503 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1504 1505 pCtx->msrEFER = env->efer; 1506 pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_NONE; 1507 1508 /* 1509 * Hardware accelerated mode: 1510 * Typically only 32-bits protected mode, with paging enabled, code is allowed here. 1511 */ 1512 PVMCPU pVCpu = &env->pVM->aCpus[0]; 1513 if (HMCanExecuteGuest(pVCpu, pCtx)) 1514 { 1515 *piException = EXCP_EXECUTE_HM; 1516 return true; 1517 } 1518 return false; 1519 } 1520 1521 /* 1522 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges 1523 * or 32 bits protected mode ring 0 code 1524 * 1525 * The tests are ordered by the likelihood of being true during normal execution. 1526 */ 1527 if (fFlags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) 1528 { 1529 STAM_COUNTER_INC(&gStatRefuseTFInhibit); 1530 Log2(("raw mode refused: fFlags=%#x\n", fFlags)); 1531 return false; 1532 } 1533 1534 #ifndef VBOX_RAW_V86 1535 if (fFlags & VM_MASK) { 1536 STAM_COUNTER_INC(&gStatRefuseVM86); 1537 Log2(("raw mode refused: VM_MASK\n")); 1538 return false; 1539 } 1540 #endif 1541 1542 if (env->state & CPU_EMULATE_SINGLE_INSTR) 1543 { 1544 #ifndef DEBUG_bird 1545 Log2(("raw mode refused: CPU_EMULATE_SINGLE_INSTR\n")); 1546 #endif 1547 return false; 1548 } 1549 1550 if (env->singlestep_enabled) 1551 { 1552 //Log2(("raw mode refused: Single step\n")); 1553 return false; 1554 } 1555 1556 if (!QTAILQ_EMPTY(&env->breakpoints)) 1557 { 1558 //Log2(("raw mode refused: Breakpoints\n")); 1559 return false; 1560 } 1561 1562 if (!QTAILQ_EMPTY(&env->watchpoints)) 1563 { 1564 //Log2(("raw mode refused: Watchpoints\n")); 1565 return false; 1566 } 1567 1568 u32CR0 = env->cr[0]; 1569 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE)) 1570 { 1571 STAM_COUNTER_INC(&gStatRefusePaging); 1572 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM")); 1573 return false; 1574 } 1575 1576 if (env->cr[4] & CR4_PAE_MASK) 1577 { 1578 if (!(env->cpuid_features & X86_CPUID_FEATURE_EDX_PAE)) 1579 { 1580 STAM_COUNTER_INC(&gStatRefusePAE); 1581 return false; 1582 } 1583 } 1584 1585 if (((fFlags >> HF_CPL_SHIFT) & 3) == 3) 1586 { 1587 if (!(env->eflags & IF_MASK)) 1588 { 1589 STAM_COUNTER_INC(&gStatRefuseIF0); 1590 Log2(("raw mode refused: IF (RawR3)\n")); 1591 return false; 1592 } 1593 1594 if (!(u32CR0 & CR0_WP_MASK)) 1595 { 1596 STAM_COUNTER_INC(&gStatRefuseWP0); 1597 Log2(("raw mode refused: CR0.WP + RawR0\n")); 1598 return false; 1599 } 1600 } 1601 else 1602 { 1603 // Let's start with pure 32 bits ring 0 code first 1604 if ((fFlags & (HF_SS32_MASK | HF_CS32_MASK)) != (HF_SS32_MASK | HF_CS32_MASK)) 1605 { 1606 STAM_COUNTER_INC(&gStatRefuseCode16); 1607 Log2(("raw r0 mode refused: HF_[S|C]S32_MASK fFlags=%#x\n", fFlags)); 1608 return false; 1609 } 1610 1611 /* Only R0. */ 1612 if (((fFlags >> HF_CPL_SHIFT) & 3) != 0) 1613 { 1614 STAM_COUNTER_INC(&gStatRefuseRing1or2); 1615 Log2(("raw r0 mode refused: CPL %d\n", ((fFlags >> HF_CPL_SHIFT) & 3) )); 1616 return false; 1617 } 1618 1619 if (!(u32CR0 & CR0_WP_MASK)) 1620 { 1621 STAM_COUNTER_INC(&gStatRefuseWP0); 1622 Log2(("raw r0 mode refused: CR0.WP=0!\n")); 1623 return false; 1624 } 1625 1626 #ifdef VBOX_WITH_RAW_MODE 1627 if (PATMIsPatchGCAddr(env->pVM, eip)) 1628 { 1629 Log2(("raw r0 mode forced: patch code\n")); 1630 *piException = EXCP_EXECUTE_RAW; 1631 return true; 1632 } 1633 #endif 1634 1635 #if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS) 1636 if (!(env->eflags & IF_MASK)) 1637 { 1638 STAM_COUNTER_INC(&gStatRefuseIF0); 1639 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, *env->pVMeflags)); 1640 //Log2(("RR0: Interrupts turned off; fall back to emulation\n")); 1641 return false; 1642 } 1643 #endif 1644 1645 #ifndef VBOX_WITH_RAW_RING1 1646 if (((env->eflags >> IOPL_SHIFT) & 3) != 0) 1647 { 1648 Log2(("raw r0 mode refused: IOPL %d\n", ((env->eflags >> IOPL_SHIFT) & 3))); 1649 return false; 1650 } 1651 #endif 1652 env->state |= CPU_RAW_RING0; 1653 } 1654 1655 /* 1656 * Don't reschedule the first time we're called, because there might be 1657 * special reasons why we're here that is not covered by the above checks. 1658 */ 1659 if (env->pVM->rem.s.cCanExecuteRaw == 1) 1660 { 1661 Log2(("raw mode refused: first scheduling\n")); 1662 STAM_COUNTER_INC(&gStatRefuseCanExecute); 1663 return false; 1664 } 1665 1666 /* 1667 * Stale hidden selectors means raw-mode is unsafe (being very careful). 1668 */ 1669 if (env->segs[R_CS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1670 { 1671 Log2(("raw mode refused: stale CS (%#x)\n", env->segs[R_CS].selector)); 1672 STAM_COUNTER_INC(&gaStatRefuseStale[R_CS]); 1673 return false; 1674 } 1675 if (env->segs[R_SS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1676 { 1677 Log2(("raw mode refused: stale SS (%#x)\n", env->segs[R_SS].selector)); 1678 STAM_COUNTER_INC(&gaStatRefuseStale[R_SS]); 1679 return false; 1680 } 1681 if (env->segs[R_DS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1682 { 1683 Log2(("raw mode refused: stale DS (%#x)\n", env->segs[R_DS].selector)); 1684 STAM_COUNTER_INC(&gaStatRefuseStale[R_DS]); 1685 return false; 1686 } 1687 if (env->segs[R_ES].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1688 { 1689 Log2(("raw mode refused: stale ES (%#x)\n", env->segs[R_ES].selector)); 1690 STAM_COUNTER_INC(&gaStatRefuseStale[R_ES]); 1691 return false; 1692 } 1693 if (env->segs[R_FS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1694 { 1695 Log2(("raw mode refused: stale FS (%#x)\n", env->segs[R_FS].selector)); 1696 STAM_COUNTER_INC(&gaStatRefuseStale[R_FS]); 1697 return false; 1698 } 1699 if (env->segs[R_GS].fVBoxFlags & CPUMSELREG_FLAGS_STALE) 1700 { 1701 Log2(("raw mode refused: stale GS (%#x)\n", env->segs[R_GS].selector)); 1702 STAM_COUNTER_INC(&gaStatRefuseStale[R_GS]); 1703 return false; 1704 } 1705 1706 /* Assert(env->pVCpu && PGMPhysIsA20Enabled(env->pVCpu));*/ 1707 *piException = EXCP_EXECUTE_RAW; 1708 return true; 1709 } 1710 1711 1712 #ifdef VBOX_WITH_RAW_MODE 1713 /** 1714 * Fetches a code byte. 1715 * 1716 * @returns Success indicator (bool) for ease of use. 1717 * @param env The CPU environment structure. 1718 * @param GCPtrInstr Where to fetch code. 1719 * @param pu8Byte Where to store the byte on success 1720 */ 1721 bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte) 1722 { 1723 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte); 1724 if (RT_SUCCESS(rc)) 1421 CPUMCTX Ctx; 1422 PCPUMCTX pCtx = &Ctx; 1423 #endif 1424 /** @todo NEM: scheduling. */ 1425 1426 env->state |= CPU_RAW_HM; 1427 1428 /* 1429 * Create partial context for HMCanExecuteGuest. 1430 */ 1431 pCtx->cr0 = env->cr[0]; 1432 pCtx->cr3 = env->cr[3]; 1433 pCtx->cr4 = env->cr[4]; 1434 1435 pCtx->tr.Sel = env->tr.selector; 1436 pCtx->tr.ValidSel = env->tr.selector; 1437 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID; 1438 pCtx->tr.u64Base = env->tr.base; 1439 pCtx->tr.u32Limit = env->tr.limit; 1440 pCtx->tr.Attr.u = (env->tr.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1441 1442 pCtx->ldtr.Sel = env->ldt.selector; 1443 pCtx->ldtr.ValidSel = env->ldt.selector; 1444 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 1445 pCtx->ldtr.u64Base = env->ldt.base; 1446 pCtx->ldtr.u32Limit = env->ldt.limit; 1447 pCtx->ldtr.Attr.u = (env->ldt.flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1448 1449 pCtx->idtr.cbIdt = env->idt.limit; 1450 pCtx->idtr.pIdt = env->idt.base; 1451 1452 pCtx->gdtr.cbGdt = env->gdt.limit; 1453 pCtx->gdtr.pGdt = env->gdt.base; 1454 1455 pCtx->rsp = env->regs[R_ESP]; 1456 pCtx->rip = env->eip; 1457 1458 pCtx->eflags.u32 = env->eflags; 1459 1460 pCtx->cs.Sel = env->segs[R_CS].selector; 1461 pCtx->cs.ValidSel = env->segs[R_CS].selector; 1462 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID; 1463 pCtx->cs.u64Base = env->segs[R_CS].base; 1464 pCtx->cs.u32Limit = env->segs[R_CS].limit; 1465 pCtx->cs.Attr.u = (env->segs[R_CS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1466 1467 pCtx->ds.Sel = env->segs[R_DS].selector; 1468 pCtx->ds.ValidSel = env->segs[R_DS].selector; 1469 pCtx->ds.fFlags = CPUMSELREG_FLAGS_VALID; 1470 pCtx->ds.u64Base = env->segs[R_DS].base; 1471 pCtx->ds.u32Limit = env->segs[R_DS].limit; 1472 pCtx->ds.Attr.u = (env->segs[R_DS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1473 1474 pCtx->es.Sel = env->segs[R_ES].selector; 1475 pCtx->es.ValidSel = env->segs[R_ES].selector; 1476 pCtx->es.fFlags = CPUMSELREG_FLAGS_VALID; 1477 pCtx->es.u64Base = env->segs[R_ES].base; 1478 pCtx->es.u32Limit = env->segs[R_ES].limit; 1479 pCtx->es.Attr.u = (env->segs[R_ES].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1480 1481 pCtx->fs.Sel = env->segs[R_FS].selector; 1482 pCtx->fs.ValidSel = env->segs[R_FS].selector; 1483 pCtx->fs.fFlags = CPUMSELREG_FLAGS_VALID; 1484 pCtx->fs.u64Base = env->segs[R_FS].base; 1485 pCtx->fs.u32Limit = env->segs[R_FS].limit; 1486 pCtx->fs.Attr.u = (env->segs[R_FS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1487 1488 pCtx->gs.Sel = env->segs[R_GS].selector; 1489 pCtx->gs.ValidSel = env->segs[R_GS].selector; 1490 pCtx->gs.fFlags = CPUMSELREG_FLAGS_VALID; 1491 pCtx->gs.u64Base = env->segs[R_GS].base; 1492 pCtx->gs.u32Limit = env->segs[R_GS].limit; 1493 pCtx->gs.Attr.u = (env->segs[R_GS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1494 1495 pCtx->ss.Sel = env->segs[R_SS].selector; 1496 pCtx->ss.ValidSel = env->segs[R_SS].selector; 1497 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID; 1498 pCtx->ss.u64Base = env->segs[R_SS].base; 1499 pCtx->ss.u32Limit = env->segs[R_SS].limit; 1500 pCtx->ss.Attr.u = (env->segs[R_SS].flags >> SEL_FLAGS_SHIFT) & SEL_FLAGS_SMASK; 1501 1502 pCtx->msrEFER = env->efer; 1503 pCtx->hwvirt.enmHwvirt = CPUMHWVIRT_NONE; 1504 1505 /* 1506 * Hardware accelerated mode: 1507 * Typically only 32-bits protected mode, with paging enabled, code is allowed here. 1508 */ 1509 PVMCPU pVCpu = &env->pVM->aCpus[0]; 1510 if (HMCanExecuteGuest(pVCpu, pCtx)) 1511 { 1512 *piException = EXCP_EXECUTE_HM; 1725 1513 return true; 1514 } 1726 1515 return false; 1727 1516 } 1728 #endif /* VBOX_WITH_RAW_MODE */1729 1517 1730 1518 … … 1815 1603 void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr) 1816 1604 { 1817 #ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC1818 Assert(env->pVM->rem.s.fInREM);1819 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */1820 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */1821 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */1822 && !(env->eflags & VM_MASK) /* no V86 mode */1823 && VM_IS_RAW_MODE_ENABLED(env->pVM))1824 CSAMR3MonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);1825 #endif1826 1605 } 1827 1606 … … 1836 1615 { 1837 1616 Assert(env->pVM->rem.s.fInREM); 1838 #ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC1839 if ( (env->cr[0] & X86_CR0_PG) /* paging must be enabled */1840 && !(env->state & CPU_EMULATE_SINGLE_INSTR) /* ignore during single instruction execution */1841 && (((env->hflags >> HF_CPL_SHIFT) & 3) == 0) /* supervisor mode only */1842 && !(env->eflags & VM_MASK) /* no V86 mode */1843 && VM_IS_RAW_MODE_ENABLED(env->pVM))1844 CSAMR3UnmonitorPage(env->pVM, GCPtr, CSAM_TAG_REM);1845 #endif1846 1617 } 1847 1618 … … 2064 1835 void remR3RecordCall(CPUX86State *env) 2065 1836 { 2066 #ifdef VBOX_WITH_RAW_MODE2067 CSAMR3RecordCallAddress(env->pVM, env->eip);2068 #endif2069 1837 } 2070 1838
Note:
See TracChangeset
for help on using the changeset viewer.