Changeset 45701 in vbox for trunk/src/VBox/VMM/VMMR3
- Timestamp:
- Apr 24, 2013 2:21:09 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR3
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r45692 r45701 360 360 361 361 /* 362 * Command s?362 * Command pending? Process it. 363 363 */ 364 364 if (pVM->dbgf.s.enmVMMCmd != DBGFCMD_NO_COMMAND) 365 365 { 366 #ifdef VBOX_WITH_RAW_MODE367 /** @todo stupid GDT/LDT sync hack. go away! */368 SELMR3UpdateFromCPUM(pVM, pVCpu);369 #endif370 371 /*372 * Process the command.373 */374 366 bool fResumeExecution; 375 367 DBGFCMDDATA CmdData = pVM->dbgf.s.VMMCmdData; … … 672 664 673 665 LogFlow(("dbgfR3VMMWait:\n")); 674 675 #ifdef VBOX_WITH_RAW_MODE676 /** @todo stupid GDT/LDT sync hack. go away! */677 SELMR3UpdateFromCPUM(pVM, pVCpu);678 #endif679 666 int rcRet = VINF_SUCCESS; 680 667 -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r45533 r45701 84 84 int rc; 85 85 PCPUMCTX pCtx = pVCpu->em.s.pCtx; 86 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); 86 # ifdef VBOX_WITH_RAW_MODE 87 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); 88 # endif 87 89 88 90 /* … … 501 503 /** @todo change this FF hack into an assertion, they simply SHALL NOT be set in 502 504 * HM mode. */ 505 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); 503 506 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HM mode; shouldn't be set really. */ 504 507 #endif -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r45665 r45701 487 487 AssertLogRelMsgFailedReturn(("SUPR3QueryVTCaps didn't return either AMD-V or VT-x flag set (%#x)!\n", fCaps), 488 488 VERR_INTERNAL_ERROR_5); 489 490 /* 491 * Do we require a little bit or raw-mode for 64-bit guest execution? 492 */ 493 pVM->fHMNeedRawModeCtx = HC_ARCH_BITS == 32 494 && pVM->fHMEnabled 495 && pVM->hm.s.fAllow64BitGuests; 489 496 } 490 497 else … … 831 838 { 832 839 #ifdef VBOX_WITH_RAW_MODE 833 /* Disable PATM & CSAM. */834 PATMR3AllowPatching(pVM->pUVM, false);835 CSAMDisableScanning(pVM);836 837 840 /* Turn off IDT/LDT/GDT and TSS monitoring and sycing. */ 838 841 SELMR3DisableMonitoring(pVM); … … 840 843 #endif 841 844 842 /* Disable the switcher code (safety precaution). */843 VMMR3DisableSwitcher(pVM);844 845 845 /* Disable mapping of the hypervisor into the shadow page table. */ 846 846 PGMR3MappingsDisable(pVM); 847 848 /* Disable the switcher */849 VMMR3DisableSwitcher(pVM);850 847 851 848 /* Reinit the paging mode to force the new shadow mode. */ -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r45618 r45701 3379 3379 3380 3380 #ifdef VBOX_WITH_RAW_MODE 3381 if (enmSwitcher != VMMSWITCHER_INVALID) 3381 if ( enmSwitcher != VMMSWITCHER_INVALID 3382 && !HMIsEnabled(pVM)) 3382 3383 { 3383 3384 /* -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r45618 r45701 123 123 VMMR3DECL(int) SELMR3Init(PVM pVM) 124 124 { 125 int rc; 125 126 LogFlow(("SELMR3Init\n")); 126 127 … … 149 150 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = (SELM_GDT_ELEMENTS - 0x5) << 3; 150 151 151 /* 152 * Allocate GDT table. 153 */ 154 int rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS, 152 if (HMIsRawModeCtxNeeded(pVM)) 153 { 154 /* 155 * Allocate GDT table. 156 */ 157 rc = MMR3HyperAllocOnceNoRel(pVM, sizeof(pVM->selm.s.paGdtR3[0]) * SELM_GDT_ELEMENTS, 155 158 PAGE_SIZE, MM_TAG_SELM, (void **)&pVM->selm.s.paGdtR3); 156 AssertRCReturn(rc, rc); 157 158 /* 159 * Allocate LDT area. 160 */ 161 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3); 162 AssertRCReturn(rc, rc); 159 AssertRCReturn(rc, rc); 160 161 /* 162 * Allocate LDT area. 163 */ 164 rc = MMR3HyperAllocOnceNoRel(pVM, _64K + PAGE_SIZE, PAGE_SIZE, MM_TAG_SELM, &pVM->selm.s.pvLdtR3); 165 AssertRCReturn(rc, rc); 166 } 163 167 164 168 /* … … 178 182 pVM->selm.s.fSyncTSSRing0Stack = false; 179 183 180 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it 181 * for I/O operations. */ 184 /* The I/O bitmap starts right after the virtual interrupt redirection 185 bitmap. Outside the TSS on purpose; the CPU will not check it for 186 I/O operations. */ 182 187 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS); 183 188 /* bit set to 1 means no redirection */ … … 197 202 * Statistics. 198 203 */ 199 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT."); 200 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT."); 201 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected."); 202 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS."); 203 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS."); 204 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed."); 205 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS."); 206 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body."); 207 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body."); 208 209 STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors."); 210 STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors."); 211 212 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM."); 213 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM."); 214 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM."); 215 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM."); 216 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM."); 217 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM."); 218 219 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM."); 220 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM."); 221 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM."); 222 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM."); 223 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM."); 224 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM."); 225 226 STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg, STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM."); 227 228 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM."); 229 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM."); 230 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM."); 231 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM."); 232 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM."); 233 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM."); 204 if (!HMIsEnabled(pVM)) 205 { 206 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest GDT."); 207 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestGDTUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/GDTEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest GDT."); 208 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestLDT, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/LDT", STAMUNIT_OCCURENCES, "The number of writes to the Guest LDT was detected."); 209 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSInt", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS."); 210 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSRedir, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSRedir",STAMUNIT_OCCURENCES, "The number of handled redir bitmap writes to the Guest TSS."); 211 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSHandledChanged,STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSIntChg", STAMUNIT_OCCURENCES, "The number of handled writes to the Guest TSS where the R0 stack changed."); 212 STAM_REG(pVM, &pVM->selm.s.StatRCWriteGuestTSSUnhandled, STAMTYPE_COUNTER, "/SELM/GC/Write/Guest/TSSEmu", STAMUNIT_OCCURENCES, "The number of unhandled writes to the Guest TSS."); 213 STAM_REG(pVM, &pVM->selm.s.StatTSSSync, STAMTYPE_PROFILE, "/PROF/SELM/TSSSync", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3SyncTSS() body."); 214 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body."); 215 216 STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors."); 217 STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors."); 218 219 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleES", STAMUNIT_OCCURENCES, "Stale ES was detected in UpdateFromCPUM."); 220 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleCS", STAMUNIT_OCCURENCES, "Stale CS was detected in UpdateFromCPUM."); 221 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleSS", STAMUNIT_OCCURENCES, "Stale SS was detected in UpdateFromCPUM."); 222 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleDS", STAMUNIT_OCCURENCES, "Stale DS was detected in UpdateFromCPUM."); 223 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleFS", STAMUNIT_OCCURENCES, "Stale FS was detected in UpdateFromCPUM."); 224 STAM_REL_REG(pVM, &pVM->selm.s.aStatDetectedStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/DetectedStaleGS", STAMUNIT_OCCURENCES, "Stale GS was detected in UpdateFromCPUM."); 225 226 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleES", STAMUNIT_OCCURENCES, "Already stale ES in UpdateFromCPUM."); 227 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleCS", STAMUNIT_OCCURENCES, "Already stale CS in UpdateFromCPUM."); 228 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleSS", STAMUNIT_OCCURENCES, "Already stale SS in UpdateFromCPUM."); 229 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleDS", STAMUNIT_OCCURENCES, "Already stale DS in UpdateFromCPUM."); 230 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleFS", STAMUNIT_OCCURENCES, "Already stale FS in UpdateFromCPUM."); 231 STAM_REL_REG(pVM, &pVM->selm.s.aStatAlreadyStaleSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/AlreadyStaleGS", STAMUNIT_OCCURENCES, "Already stale GS in UpdateFromCPUM."); 232 233 STAM_REL_REG(pVM, &pVM->selm.s.StatStaleToUnstaleSReg, STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/StaleToUnstale", STAMUNIT_OCCURENCES, "Transitions from stale to unstale UpdateFromCPUM."); 234 235 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_ES], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedES", STAMUNIT_OCCURENCES, "Updated hidden ES values in UpdateFromCPUM."); 236 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_CS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedCS", STAMUNIT_OCCURENCES, "Updated hidden CS values in UpdateFromCPUM."); 237 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_SS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedSS", STAMUNIT_OCCURENCES, "Updated hidden SS values in UpdateFromCPUM."); 238 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_DS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedDS", STAMUNIT_OCCURENCES, "Updated hidden DS values in UpdateFromCPUM."); 239 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_FS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedFS", STAMUNIT_OCCURENCES, "Updated hidden FS values in UpdateFromCPUM."); 240 STAM_REG( pVM, &pVM->selm.s.aStatUpdatedSReg[X86_SREG_GS], STAMTYPE_COUNTER, "/SELM/UpdateFromCPUM/UpdatedGS", STAMUNIT_OCCURENCES, "Updated hidden GS values in UpdateFromCPUM."); 241 } 234 242 235 243 STAM_REG( pVM, &pVM->selm.s.StatLoadHidSelGst, STAMTYPE_COUNTER, "/SELM/LoadHidSel/LoadedGuest", STAMUNIT_OCCURENCES, "SELMLoadHiddenSelectorReg: Loaded from guest tables."); … … 242 250 * Default action when entering raw mode for the first time 243 251 */ 244 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */ 245 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 246 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 247 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 252 if (!HMIsEnabled(pVM)) 253 { 254 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */ 255 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 256 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 257 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 258 } 248 259 #endif 249 260 … … 251 262 * Register info handlers. 252 263 */ 253 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt); 264 if (HMIsRawModeCtxNeeded(pVM)) 265 { 266 DBGFR3InfoRegisterInternal(pVM, "gdt", "Displays the shadow GDT. No arguments.", &selmR3InfoGdt); 267 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt); 268 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss); 269 } 254 270 DBGFR3InfoRegisterInternal(pVM, "gdtguest", "Displays the guest GDT. No arguments.", &selmR3InfoGdtGuest); 255 DBGFR3InfoRegisterInternal(pVM, "ldt", "Displays the shadow LDT. No arguments.", &selmR3InfoLdt);256 271 DBGFR3InfoRegisterInternal(pVM, "ldtguest", "Displays the guest LDT. No arguments.", &selmR3InfoLdtGuest); 257 //DBGFR3InfoRegisterInternal(pVM, "tss", "Displays the shadow TSS. No arguments.", &selmR3InfoTss);258 272 //DBGFR3InfoRegisterInternal(pVM, "tssguest", "Displays the guest TSS. No arguments.", &selmR3InfoTssGuest); 259 273 … … 281 295 */ 282 296 bool f; 283 # if defined(DEBUG_bird)297 # if defined(DEBUG_bird) 284 298 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, true); 285 # else299 # else 286 300 int rc = CFGMR3QueryBoolDef(CFGMR3GetRoot(pVM), "DoubleFault", &f, false); 287 # endif301 # endif 288 302 AssertLogRelRCReturn(rc, rc); 289 if (f )303 if (f && HMIsRawModeCtxNeeded(pVM)) 290 304 { 291 305 PX86DESC paGdt = pVM->selm.s.paGdtR3; … … 419 433 LogFlow(("SELMR3Relocate\n")); 420 434 421 for (VMCPUID i = 0; i < pVM->cCpus; i++) 422 { 423 PVMCPU pVCpu = &pVM->aCpus[i]; 424 425 /* 426 * Update GDTR and selector. 427 */ 428 CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1); 429 430 /** @todo selector relocations should be a separate operation? */ 431 CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]); 432 CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]); 433 CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]); 434 CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]); 435 CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]); 436 } 437 438 selmR3SetupHyperGDTSelectors(pVM); 435 if (HMIsRawModeCtxNeeded(pVM)) 436 { 437 for (VMCPUID i = 0; i < pVM->cCpus; i++) 438 { 439 PVMCPU pVCpu = &pVM->aCpus[i]; 440 441 /* 442 * Update GDTR and selector. 443 */ 444 CPUMSetHyperGDTR(pVCpu, MMHyperR3ToRC(pVM, paGdt), SELM_GDT_ELEMENTS * sizeof(paGdt[0]) - 1); 445 446 /** @todo selector relocations should be a separate operation? */ 447 CPUMSetHyperCS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]); 448 CPUMSetHyperDS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]); 449 CPUMSetHyperES(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]); 450 CPUMSetHyperSS(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]); 451 CPUMSetHyperTR(pVCpu, pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS]); 452 } 453 454 selmR3SetupHyperGDTSelectors(pVM); 439 455 440 456 /** @todo SELM must be called when any of the CR3s changes during a cpu mode change. */ 441 457 /** @todo PGM knows the proper CR3 values these days, not CPUM. */ 442 /*443 * Update the TSSes.444 */445 /* Only applies to raw mode which supports only 1 VCPU */446 PVMCPU pVCpu = &pVM->aCpus[0];447 448 /* Current TSS */449 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu);450 pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];451 pVM->selm.s.Tss.esp0 = VMMGetStackRC(pVCpu);452 pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];453 pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];454 pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];455 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS);456 457 /* trap 08 */458 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); /* this should give use better survival chances. */459 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];460 pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];461 pVM->selm.s.TssTrap08.esp0 = VMMGetStackRC(pVCpu) - PAGE_SIZE / 2; /* upper half can be analysed this way. */462 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0;463 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0;464 pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS];465 pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];466 pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS];467 pVM->selm.s.TssTrap08.fs = 0;468 pVM->selm.s.TssTrap08.gs = 0;469 pVM->selm.s.TssTrap08.selLdt = 0;470 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */471 pVM->selm.s.TssTrap08.ecx = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */472 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx;473 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx;474 pVM->selm.s.TssTrap08.edx = VM_RC_ADDR(pVM, pVM); /* setup edx VM address. */475 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx;476 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx;477 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS);478 /* TRPM will be updating the eip */479 480 if ( !pVM->selm.s.fDisableMonitoring 481 &&!HMIsEnabled(pVM))458 /* 459 * Update the TSSes. 460 */ 461 /* Only applies to raw mode which supports only 1 VCPU */ 462 PVMCPU pVCpu = &pVM->aCpus[0]; 463 464 /* Current TSS */ 465 pVM->selm.s.Tss.cr3 = PGMGetHyperCR3(pVCpu); 466 pVM->selm.s.Tss.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]; 467 pVM->selm.s.Tss.esp0 = VMMGetStackRC(pVCpu); 468 pVM->selm.s.Tss.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]; 469 pVM->selm.s.Tss.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]; 470 pVM->selm.s.Tss.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]; 471 pVM->selm.s.Tss.offIoBitmap = sizeof(VBOXTSS); 472 473 /* trap 08 */ 474 pVM->selm.s.TssTrap08.cr3 = PGMGetInterRCCR3(pVM, pVCpu); /* this should give use better survival chances. */ 475 pVM->selm.s.TssTrap08.ss0 = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]; 476 pVM->selm.s.TssTrap08.ss = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]; 477 pVM->selm.s.TssTrap08.esp0 = VMMGetStackRC(pVCpu) - PAGE_SIZE / 2; /* upper half can be analysed this way. */ 478 pVM->selm.s.TssTrap08.esp = pVM->selm.s.TssTrap08.esp0; 479 pVM->selm.s.TssTrap08.ebp = pVM->selm.s.TssTrap08.esp0; 480 pVM->selm.s.TssTrap08.cs = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS]; 481 pVM->selm.s.TssTrap08.ds = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]; 482 pVM->selm.s.TssTrap08.es = pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS]; 483 pVM->selm.s.TssTrap08.fs = 0; 484 pVM->selm.s.TssTrap08.gs = 0; 485 pVM->selm.s.TssTrap08.selLdt = 0; 486 pVM->selm.s.TssTrap08.eflags = 0x2; /* all cleared */ 487 pVM->selm.s.TssTrap08.ecx = VM_RC_ADDR(pVM, &pVM->selm.s.Tss); /* setup ecx to normal Hypervisor TSS address. */ 488 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.ecx; 489 pVM->selm.s.TssTrap08.eax = pVM->selm.s.TssTrap08.ecx; 490 pVM->selm.s.TssTrap08.edx = VM_RC_ADDR(pVM, pVM); /* setup edx VM address. */ 491 pVM->selm.s.TssTrap08.edi = pVM->selm.s.TssTrap08.edx; 492 pVM->selm.s.TssTrap08.ebx = pVM->selm.s.TssTrap08.edx; 493 pVM->selm.s.TssTrap08.offIoBitmap = sizeof(VBOXTSS); 494 /* TRPM will be updating the eip */ 495 } 496 497 if (!HMIsEnabled(pVM)) 482 498 { 483 499 /* … … 543 559 { 544 560 NOREF(pVM); 545 return 0;561 return VINF_SUCCESS; 546 562 } 547 563 … … 602 618 603 619 #ifdef VBOX_WITH_RAW_MODE 604 /* 605 * Default action when entering raw mode for the first time 606 */ 607 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */ 608 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 609 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 610 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 620 if (!HMIsEnabled(pVM)) 621 { 622 /* 623 * Default action when entering raw mode for the first time 624 */ 625 PVMCPU pVCpu = &pVM->aCpus[0]; /* raw mode implies on VCPU */ 626 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 627 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 628 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 629 } 611 630 #endif 612 631 } … … 793 812 { 794 813 #ifdef VBOX_WITH_RAW_MODE 795 PVMCPU pVCpu = VMMGetCpu(pVM); 796 797 LogFlow(("selmR3LoadDone:\n")); 798 799 /* 800 * Don't do anything if it's a load failure. 801 */ 802 int rc = SSMR3HandleGetStatus(pSSM); 803 if (RT_FAILURE(rc)) 804 return VINF_SUCCESS; 805 806 /* 807 * Do the syncing if we're in protected mode. 808 */ 809 if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL) 810 { 814 if (!HMIsEnabled(pVM)) 815 { 816 PVMCPU pVCpu = VMMGetCpu(pVM); 817 818 LogFlow(("selmR3LoadDone:\n")); 819 820 /* 821 * Don't do anything if it's a load failure. 822 */ 823 int rc = SSMR3HandleGetStatus(pSSM); 824 if (RT_FAILURE(rc)) 825 return VINF_SUCCESS; 826 827 /* 828 * Do the syncing if we're in protected mode and using raw-mode. 829 */ 830 if (PGMGetGuestMode(pVCpu) != PGMMODE_REAL) 831 { 832 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 833 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 834 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 835 SELMR3UpdateFromCPUM(pVM, pVCpu); 836 } 837 838 /* 839 * Flag everything for resync on next raw mode entry. 840 */ 811 841 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 812 842 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 813 843 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 814 SELMR3UpdateFromCPUM(pVM, pVCpu); 815 } 816 817 /* 818 * Flag everything for resync on next raw mode entry. 819 */ 820 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 821 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 822 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 823 844 } 824 845 #endif /*VBOX_WITH_RAW_MODE*/ 825 846 return VINF_SUCCESS; … … 837 858 static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu) 838 859 { 860 Assert(!HMIsEnabled(pVM)); 861 839 862 /* 840 863 * Always assume the best... … … 1100 1123 { 1101 1124 int rc = VINF_SUCCESS; 1125 Assert(!HMIsEnabled(pVM)); 1102 1126 1103 1127 /* … … 1334 1358 { 1335 1359 Assert(CPUMIsGuestInProtectedMode(pVCpu)); 1360 Assert(!HMIsEnabled(pVM)); 1336 1361 1337 1362 /* … … 1435 1460 VMMR3DECL(VBOXSTRICTRC) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu) 1436 1461 { 1437 if (pVM->selm.s.fDisableMonitoring)1438 {1439 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT);1440 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);1441 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS);1442 return VINF_SUCCESS;1443 }1444 1445 1462 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a); 1463 AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE); 1446 1464 1447 1465 /* … … 1599 1617 VMMR3DECL(int) SELMR3SyncTSS(PVM pVM, PVMCPU pVCpu) 1600 1618 { 1601 int rc; 1602 1603 if (pVM->selm.s.fDisableMonitoring) 1604 { 1605 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 1606 return VINF_SUCCESS; 1607 } 1619 int rc; 1620 AssertReturnStmt(!HMIsEnabled(pVM), VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS), VINF_SUCCESS); 1608 1621 1609 1622 STAM_PROFILE_START(&pVM->selm.s.StatTSSSync, a); … … 1842 1855 #ifdef VBOX_STRICT 1843 1856 PVMCPU pVCpu = VMMGetCpu(pVM); 1857 AssertReturn(!HMIsEnabled(pVM), VERR_SELM_HM_IPE); 1844 1858 1845 1859 /* -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r45618 r45701 211 211 rc = RTSemEventCreate(&pVM->vmm.s.hEvtRendezvousDoneCaller); 212 212 AssertRCReturn(rc, rc); 213 214 /* GC switchers are enabled by default. Turned off by HM. */215 pVM->vmm.s.fSwitcherDisabled = false;216 213 217 214 /* … … 549 546 550 547 /* In VMX mode, there's no need to init RC. */ 551 if ( pVM->vmm.s.fSwitcherDisabled)548 if (HMIsEnabled(pVM)) 552 549 return VINF_SUCCESS; 553 550 -
trunk/src/VBox/VMM/VMMR3/VMMSwitcher.cpp
r44168 r45701 5 5 6 6 /* 7 * Copyright (C) 2006-201 2Oracle Corporation7 * Copyright (C) 2006-2013 Oracle Corporation 8 8 * 9 9 * This file is part of VirtualBox Open Source Edition (OSE), as … … 16 16 */ 17 17 18 18 19 /******************************************************************************* 19 20 * Header Files * … … 22 23 #include <VBox/vmm/vmm.h> 23 24 #include <VBox/vmm/pgm.h> 25 #include <VBox/vmm/hm.h> 24 26 #include <VBox/vmm/selm.h> 25 27 #include <VBox/vmm/mm.h> … … 46 48 * The type and index shall match! 47 49 */ 48 static PVMMSWITCHERDEF s_apSwitchers[VMMSWITCHER_MAX] =50 static PVMMSWITCHERDEF g_apRawModeSwitchers[VMMSWITCHER_MAX] = 49 51 { 50 52 NULL, /* invalid entry */ … … 73 75 &vmmR3SwitcherAMD64To32Bit_Def, 74 76 &vmmR3SwitcherAMD64ToPAE_Def, 75 NULL 77 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def, 76 78 # endif /* RT_ARCH_AMD64 */ 77 79 #else /* !VBOX_WITH_RAW_MODE */ … … 84 86 NULL, 85 87 NULL, 86 NULL 88 NULL, 87 89 #endif /* !VBOX_WITH_RAW_MODE */ 90 #ifndef RT_ARCH_AMD64 91 &vmmR3SwitcherX64Stub_Def, 92 NULL, 93 #else 94 NULL, 95 &vmmR3SwitcherAMD64Stub_Def, 96 #endif 97 }; 98 99 /** Array of switcher definitions. 100 * The type and index shall match! 101 */ 102 static PVMMSWITCHERDEF g_apHmSwitchers[VMMSWITCHER_MAX] = 103 { 104 NULL, /* invalid entry */ 105 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 106 NULL, //&vmmR3Switcher32BitTo32Bit_Def, 107 NULL, //&vmmR3Switcher32BitToPAE_Def, 108 &vmmR3Switcher32BitToAMD64_Def, 109 NULL, //&vmmR3SwitcherPAETo32Bit_Def, 110 NULL, //&vmmR3SwitcherPAEToPAE_Def, 111 &vmmR3SwitcherPAEToAMD64_Def, 112 NULL, //&vmmR3SwitcherPAETo32Bit_Def, 113 NULL, //&vmmR3SwitcherAMD64ToPAE_Def, 114 NULL, //&vmmR3SwitcherAMD64ToAMD64_Def, 115 #else /* !VBOX_WITH_RAW_MODE */ 116 NULL, 117 NULL, 118 NULL, 119 NULL, 120 NULL, 121 NULL, 122 NULL, 123 NULL, 124 NULL, 125 #endif /* !VBOX_WITH_RAW_MODE */ 126 #ifndef RT_ARCH_AMD64 127 &vmmR3SwitcherX64Stub_Def, 128 NULL, 129 #else 130 NULL, 131 &vmmR3SwitcherAMD64Stub_Def, 132 #endif 88 133 }; 89 134 … … 100 145 int vmmR3SwitcherInit(PVM pVM) 101 146 { 102 #ifndef VBOX_WITH_RAW_MODE 147 #ifndef VBOX_WITH_RAW_MODE /** @todo 64-bit on 32-bit. */ 103 148 return VINF_SUCCESS; 104 149 #else … … 106 151 * Calc the size. 107 152 */ 153 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers; 108 154 unsigned cbCoreCode = 0; 109 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)155 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++) 110 156 { 111 157 pVM->vmm.s.aoffSwitchers[iSwitcher] = cbCoreCode; 112 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];158 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher]; 113 159 if (pSwitcher) 114 160 { … … 180 226 * copy the code. 181 227 */ 182 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++)228 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++) 183 229 { 184 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher];230 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher]; 185 231 if (pSwitcher) 186 232 memcpy((uint8_t *)pVM->vmm.s.pvCoreCodeR3 + pVM->vmm.s.aoffSwitchers[iSwitcher], … … 205 251 * to get the routine addresses, so we'll reselect it. 206 252 * This may legally fail so, we're ignoring the rc. 253 * Note! See HMIsEnabled hack in selector function. 207 254 */ 208 255 VMMR3SelectSwitcher(pVM, pVM->vmm.s.enmSwitcher); … … 238 285 * Relocate all the switchers. 239 286 */ 240 for (unsigned iSwitcher = 0; iSwitcher < RT_ELEMENTS(s_apSwitchers); iSwitcher++) 241 { 242 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[iSwitcher]; 287 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers; 288 for (unsigned iSwitcher = 0; iSwitcher < VMMSWITCHER_MAX; iSwitcher++) 289 { 290 PVMMSWITCHERDEF pSwitcher = papSwitchers[iSwitcher]; 243 291 if (pSwitcher && pSwitcher->pfnRelocate) 244 292 { … … 256 304 * Recalc the RC address for the current switcher. 257 305 */ 258 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[pVM->vmm.s.enmSwitcher]; 259 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher]; 260 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost; 261 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline; 262 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm; 263 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn; 306 PVMMSWITCHERDEF pSwitcher = papSwitchers[pVM->vmm.s.enmSwitcher]; 307 if (pSwitcher) 308 { 309 RTRCPTR RCPtr = pVM->vmm.s.pvCoreCodeRC + pVM->vmm.s.aoffSwitchers[pVM->vmm.s.enmSwitcher]; 310 pVM->vmm.s.pfnRCToHost = RCPtr + pSwitcher->offRCToHost; 311 pVM->vmm.s.pfnCallTrampolineRC = RCPtr + pSwitcher->offRCCallTrampoline; 312 pVM->pfnVMMRCToHostAsm = RCPtr + pSwitcher->offRCToHostAsm; 313 pVM->pfnVMMRCToHostAsmNoReturn = RCPtr + pSwitcher->offRCToHostAsmNoReturn; 314 } 315 else 316 AssertRelease(HMIsEnabled(pVM)); 264 317 265 318 // AssertFailed(); … … 962 1015 } 963 1016 964 /* Do nothing if the switcher is disabled. */ 965 if (pVM->vmm.s.fSwitcherDisabled) 966 return VINF_SUCCESS; 1017 /* 1018 * Override it if HM is active. 1019 */ 1020 if (HMIsEnabled(pVM)) 1021 pVM->vmm.s.enmSwitcher = HC_ARCH_BITS == 64 ? VMMSWITCHER_AMD64_STUB : VMMSWITCHER_X86_STUB; 967 1022 968 1023 /* 969 1024 * Select the new switcher. 970 1025 */ 971 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher]; 1026 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers; 1027 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher]; 972 1028 if (pSwitcher) 973 1029 { … … 993 1049 994 1050 /** 995 * Disable the switcher logic permanently.996 *997 * @returns VBox status code.998 * @param pVM Pointer to the VM.999 */1000 VMMR3_INT_DECL(int) VMMR3DisableSwitcher(PVM pVM)1001 {1002 /** @todo r=bird: I would suggest that we create a dummy switcher which just does something like:1003 * @code1004 * mov eax, VERR_VMM_DUMMY_SWITCHER1005 * ret1006 * @endcode1007 * And then check for fSwitcherDisabled in VMMR3SelectSwitcher() in order to prevent it from being removed.1008 */1009 pVM->vmm.s.fSwitcherDisabled = true;1010 return VINF_SUCCESS;1011 }1012 1013 1014 /**1015 1051 * Gets the switcher to be used for switching to GC. 1016 1052 * … … 1034 1070 * Select the new switcher. 1035 1071 */ 1036 PVMMSWITCHERDEF pSwitcher = s_apSwitchers[enmSwitcher]; 1072 const PVMMSWITCHERDEF *papSwitchers = HMIsEnabled(pVM) ? g_apHmSwitchers : g_apRawModeSwitchers; 1073 PVMMSWITCHERDEF pSwitcher = papSwitchers[enmSwitcher]; 1037 1074 if (pSwitcher) 1038 1075 {
Note:
See TracChangeset
for help on using the changeset viewer.