Changeset 43387 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Sep 21, 2012 9:40:25 AM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 80859
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 50 edited
- 10 moved
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/Makefile.kmk
r42777 r43387 120 120 VMMR3/EM.cpp \ 121 121 VMMR3/EMRaw.cpp \ 122 VMMR3/EMH waccm.cpp \122 VMMR3/EMHM.cpp \ 123 123 VMMR3/FTM.cpp \ 124 124 VMMR3/IEMR3.cpp \ … … 160 160 VMMR3/VMMSwitcher.cpp \ 161 161 VMMR3/VMMTests.cpp \ 162 VMMR3/H WACCM.cpp \162 VMMR3/HM.cpp \ 163 163 VMMR3/CSAM.cpp \ 164 164 VMMR3/PATM.cpp \ … … 170 170 VMMAll/CPUMStack.cpp \ 171 171 VMMAll/DBGFAll.cpp \ 172 VMMAll/H WACCMAll.cpp \172 VMMAll/HMAll.cpp \ 173 173 VMMAll/IOMAll.cpp \ 174 174 VMMAll/IOMAllMMIO.cpp \ … … 390 390 VMMRC/VMMRC.cpp \ 391 391 VMMRC/VMMRCA.asm \ 392 VMMRC/H WACCMRCA.asm \392 VMMRC/HMRCA.asm \ 393 393 VMMRC/CSAMRC.cpp \ 394 394 VMMRC/PATMRC.cpp \ … … 488 488 VMMR0/GMMR0.cpp \ 489 489 VMMR0/GVMMR0.cpp \ 490 VMMR0/H WACCMR0.cpp \491 VMMR0/H WACCMR0A.asm \490 VMMR0/HMR0.cpp \ 491 VMMR0/HMR0A.asm \ 492 492 VMMR0/HWSVMR0.cpp \ 493 493 VMMR0/HWVMXR0.cpp \ … … 508 508 VMMAll/EMAllA.asm \ 509 509 VMMAll/FTMAll.cpp \ 510 VMMAll/H WACCMAll.cpp \510 VMMAll/HMAll.cpp \ 511 511 VMMAll/IOMAll.cpp \ 512 512 VMMAll/IOMAllMMIO.cpp \ -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r43151 r43387 35 35 #include <VBox/dis.h> 36 36 #include <VBox/log.h> 37 #include <VBox/vmm/h waccm.h>37 #include <VBox/vmm/hm.h> 38 38 #include <VBox/vmm/tm.h> 39 39 #include <iprt/assert.h> … … 89 89 { 90 90 Assert(!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)); 91 Assert(!H WACCMIsEnabled(pVCpu->CTX_SUFF(pVM)));91 Assert(!HMIsEnabled(pVCpu->CTX_SUFF(pVM))); 92 92 Assert((uintptr_t)(pSReg - &pVCpu->cpum.s.Guest.es) < X86_SREG_COUNT); 93 93 … … 545 545 #ifdef VBOX_WITH_IEM 546 546 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 547 if (!H WACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))547 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 548 548 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 549 549 # endif … … 559 559 #ifdef VBOX_WITH_IEM 560 560 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 561 if (!H WACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))561 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 562 562 VMCPU_FF_SET(pVCpu, VMCPU_FF_TRPM_SYNC_IDT); 563 563 # endif … … 573 573 #ifdef VBOX_WITH_IEM 574 574 # ifdef VBOX_WITH_RAW_MODE_NOT_R0 575 if (!H WACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))575 if (!HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 576 576 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 577 577 # endif … … 588 588 if ( ( ldtr != 0 589 589 || pVCpu->cpum.s.Guest.ldtr.Sel != 0) 590 && !H WACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))590 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 591 591 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 592 592 # endif … … 1236 1236 { 1237 1237 /// @todo PGMFlushTLB(pVCpu, cr3, true /*fGlobal*/); 1238 H WACCMFlushTLB(pVCpu);1238 HMFlushTLB(pVCpu); 1239 1239 1240 1240 /* Notify PGM about NXE changes. */ -
trunk/src/VBox/VMM/VMMAll/EMAll.cpp
r42780 r43387 34 34 #include <VBox/vmm/vm.h> 35 35 #include <VBox/vmm/vmm.h> 36 #include <VBox/vmm/h waccm.h>36 #include <VBox/vmm/hm.h> 37 37 #include <VBox/vmm/tm.h> 38 38 #include <VBox/vmm/pdmapi.h> … … 378 378 if (rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT) 379 379 { 380 H WACCMInvalidatePage(pVCpu, uSrcAddr);380 HMInvalidatePage(pVCpu, uSrcAddr); 381 381 if (((uSrcAddr + cbToRead - 1) >> PAGE_SHIFT) != (uSrcAddr >> PAGE_SHIFT)) 382 H WACCMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1);382 HMInvalidatePage(pVCpu, uSrcAddr + cbToRead - 1); 383 383 } 384 384 #endif … … 2926 2926 2927 2927 2928 #ifdef IN_RC /** @todo test+enable for H WACCM as well. */2928 #ifdef IN_RC /** @todo test+enable for HM as well. */ 2929 2929 /** 2930 2930 * [LOCK] XADD emulation. -
trunk/src/VBox/VMM/VMMAll/HMAll.cpp
r43373 r43387 1 1 /* $Id$ */ 2 2 /** @file 3 * H WACCM - All contexts.3 * HM - All contexts. 4 4 */ 5 5 … … 20 20 * Header Files * 21 21 *******************************************************************************/ 22 #define LOG_GROUP LOG_GROUP_H WACCM23 #include <VBox/vmm/h waccm.h>22 #define LOG_GROUP LOG_GROUP_HM 23 #include <VBox/vmm/hm.h> 24 24 #include <VBox/vmm/pgm.h> 25 #include "H WACCMInternal.h"25 #include "HMInternal.h" 26 26 #include <VBox/vmm/vm.h> 27 #include <VBox/vmm/h wacc_vmx.h>28 #include <VBox/vmm/h wacc_svm.h>27 #include <VBox/vmm/hm_vmx.h> 28 #include <VBox/vmm/hm_svm.h> 29 29 #include <VBox/err.h> 30 30 #include <VBox/log.h> … … 43 43 * @param GCVirt Page to invalidate 44 44 */ 45 static void h waccmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt)45 static void hmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt) 46 46 { 47 47 /* Nothing to do if a TLB flush is already pending */ … … 53 53 #else 54 54 Be very careful when activating this code! 55 if (iPage == RT_ELEMENTS(pVCpu->h waccm.s.TlbShootdown.aPages))55 if (iPage == RT_ELEMENTS(pVCpu->hm.s.TlbShootdown.aPages)) 56 56 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 57 57 else … … 67 67 * @param GCVirt Page to invalidate 68 68 */ 69 VMMDECL(int) H WACCMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt)70 { 71 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushPageManual);69 VMMDECL(int) HMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) 70 { 71 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual); 72 72 #ifdef IN_RING0 73 73 PVM pVM = pVCpu->CTX_SUFF(pVM); 74 if (pVM->h waccm.s.vmx.fSupported)74 if (pVM->hm.s.vmx.fSupported) 75 75 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt); 76 76 77 Assert(pVM->h waccm.s.svm.fSupported);77 Assert(pVM->hm.s.svm.fSupported); 78 78 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt); 79 79 80 80 #else 81 h waccmQueueInvlPage(pVCpu, GCVirt);81 hmQueueInvlPage(pVCpu, GCVirt); 82 82 return VINF_SUCCESS; 83 83 #endif … … 90 90 * @param pVCpu Pointer to the VMCPU. 91 91 */ 92 VMMDECL(int) H WACCMFlushTLB(PVMCPU pVCpu)93 { 94 LogFlow(("H WACCMFlushTLB\n"));92 VMMDECL(int) HMFlushTLB(PVMCPU pVCpu) 93 { 94 LogFlow(("HMFlushTLB\n")); 95 95 96 96 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 97 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBManual);97 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBManual); 98 98 return VINF_SUCCESS; 99 99 } … … 105 105 * 106 106 */ 107 static DECLCALLBACK(void) h waccmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2)107 static DECLCALLBACK(void) hmFlushHandler(RTCPUID idCpu, void *pvUser1, void *pvUser2) 108 108 { 109 109 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2); … … 116 116 static void hmR0PokeCpu(PVMCPU pVCpu, RTCPUID idHostCpu) 117 117 { 118 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->h waccm.s.cWorldSwitchExits);119 120 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatPoke, x);118 uint32_t cWorldSwitchExits = ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits); 119 120 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatPoke, x); 121 121 int rc = RTMpPokeCpu(idHostCpu); 122 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatPoke, x);122 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatPoke, x); 123 123 124 124 /* Not implemented on some platforms (Darwin, Linux kernel < 2.6.19); fall … … 126 126 if (rc == VERR_NOT_SUPPORTED) 127 127 { 128 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatSpinPoke, z);128 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z); 129 129 /* synchronous. */ 130 RTMpOnSpecific(idHostCpu, h waccmFlushHandler, 0, 0);131 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatSpinPoke, z);130 RTMpOnSpecific(idHostCpu, hmFlushHandler, 0, 0); 131 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z); 132 132 } 133 133 else 134 134 { 135 135 if (rc == VINF_SUCCESS) 136 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatSpinPoke, z);136 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPoke, z); 137 137 else 138 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatSpinPokeFailed, z);138 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatSpinPokeFailed, z); 139 139 140 140 /** @todo If more than one CPU is going to be poked, we could optimize this … … 144 144 * then. */ 145 145 /* Spin until the VCPU has switched back (poking is async). */ 146 while ( ASMAtomicUoReadBool(&pVCpu->h waccm.s.fCheckedTLBFlush)147 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->h waccm.s.cWorldSwitchExits))146 while ( ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush) 147 && cWorldSwitchExits == ASMAtomicUoReadU32(&pVCpu->hm.s.cWorldSwitchExits)) 148 148 ASMNopPause(); 149 149 150 150 if (rc == VINF_SUCCESS) 151 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatSpinPoke, z);151 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPoke, z); 152 152 else 153 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatSpinPokeFailed, z);153 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatSpinPokeFailed, z); 154 154 } 155 155 } … … 167 167 static void hmPokeCpuForTlbFlush(PVMCPU pVCpu, bool fAccountFlushStat) 168 168 { 169 if (ASMAtomicUoReadBool(&pVCpu->h waccm.s.fCheckedTLBFlush))169 if (ASMAtomicUoReadBool(&pVCpu->hm.s.fCheckedTLBFlush)) 170 170 { 171 171 if (fAccountFlushStat) 172 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdownFlush);172 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdownFlush); 173 173 else 174 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdown);174 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 175 175 #ifdef IN_RING0 176 RTCPUID idHostCpu = pVCpu->h waccm.s.idEnteredCpu;176 RTCPUID idHostCpu = pVCpu->hm.s.idEnteredCpu; 177 177 if (idHostCpu != NIL_RTCPUID) 178 178 hmR0PokeCpu(pVCpu, idHostCpu); … … 182 182 } 183 183 else 184 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushPageManual);184 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageManual); 185 185 } 186 186 … … 193 193 * @param GCVirt Page to invalidate 194 194 */ 195 VMMDECL(int) H WACCMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr)195 VMMDECL(int) HMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr) 196 196 { 197 197 VMCPUID idCurCpu = VMMGetCpuId(pVM); 198 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].h waccm.s.StatFlushPage);198 STAM_COUNTER_INC(&pVM->aCpus[idCurCpu].hm.s.StatFlushPage); 199 199 200 200 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) … … 208 208 209 209 if (pVCpu->idCpu == idCurCpu) 210 H WACCMInvalidatePage(pVCpu, GCPtr);210 HMInvalidatePage(pVCpu, GCPtr); 211 211 else 212 212 { 213 h waccmQueueInvlPage(pVCpu, GCPtr);213 hmQueueInvlPage(pVCpu, GCPtr); 214 214 hmPokeCpuForTlbFlush(pVCpu, false /*fAccountFlushStat*/); 215 215 } … … 226 226 * @param pVM Pointer to the VM. 227 227 */ 228 VMMDECL(int) H WACCMFlushTLBOnAllVCpus(PVM pVM)228 VMMDECL(int) HMFlushTLBOnAllVCpus(PVM pVM) 229 229 { 230 230 if (pVM->cCpus == 1) 231 return H WACCMFlushTLB(&pVM->aCpus[0]);231 return HMFlushTLB(&pVM->aCpus[0]); 232 232 233 233 VMCPUID idThisCpu = VMMGetCpuId(pVM); 234 234 235 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].h waccm.s.StatFlushTLB);235 STAM_COUNTER_INC(&pVM->aCpus[idThisCpu].hm.s.StatFlushTLB); 236 236 237 237 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) … … 260 260 * @param pVM Pointer to the VM. 261 261 */ 262 VMMDECL(bool) H WACCMIsNestedPagingActive(PVM pVM)263 { 264 return H WACCMIsEnabled(pVM) && pVM->hwaccm.s.fNestedPaging;262 VMMDECL(bool) HMIsNestedPagingActive(PVM pVM) 263 { 264 return HMIsEnabled(pVM) && pVM->hm.s.fNestedPaging; 265 265 } 266 266 … … 271 271 * @param pVM Pointer to the VM. 272 272 */ 273 VMMDECL(PGMMODE) H WACCMGetShwPagingMode(PVM pVM)274 { 275 Assert(H WACCMIsNestedPagingActive(pVM));276 if (pVM->h waccm.s.svm.fSupported)273 VMMDECL(PGMMODE) HMGetShwPagingMode(PVM pVM) 274 { 275 Assert(HMIsNestedPagingActive(pVM)); 276 if (pVM->hm.s.svm.fSupported) 277 277 return PGMMODE_NESTED; 278 278 279 Assert(pVM->h waccm.s.vmx.fSupported);279 Assert(pVM->hm.s.vmx.fSupported); 280 280 return PGMMODE_EPT; 281 281 } … … 290 290 * @param GCPhys Page to invalidate 291 291 */ 292 VMMDECL(int) H WACCMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)293 { 294 if (!H WACCMIsNestedPagingActive(pVM))292 VMMDECL(int) HMInvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys) 293 { 294 if (!HMIsNestedPagingActive(pVM)) 295 295 return VINF_SUCCESS; 296 296 297 297 #ifdef IN_RING0 298 if (pVM->h waccm.s.vmx.fSupported)298 if (pVM->hm.s.vmx.fSupported) 299 299 { 300 300 VMCPUID idThisCpu = VMMGetCpuId(pVM); … … 317 317 /* AMD-V doesn't support invalidation with guest physical addresses; see 318 318 comment in SVMR0InvalidatePhysPage. */ 319 Assert(pVM->h waccm.s.svm.fSupported);319 Assert(pVM->hm.s.svm.fSupported); 320 320 #else 321 321 NOREF(GCPhys); 322 322 #endif 323 323 324 H WACCMFlushTLBOnAllVCpus(pVM);324 HMFlushTLBOnAllVCpus(pVM); 325 325 return VINF_SUCCESS; 326 326 } … … 332 332 * @param pVM Pointer to the VM. 333 333 */ 334 VMMDECL(bool) H WACCMHasPendingIrq(PVM pVM)334 VMMDECL(bool) HMHasPendingIrq(PVM pVM) 335 335 { 336 336 PVMCPU pVCpu = VMMGetCpu(pVM); 337 return !!pVCpu->h waccm.s.Event.fPending;338 } 339 337 return !!pVCpu->hm.s.Event.fPending; 338 } 339 -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r42726 r43387 35 35 #include <VBox/vmm/vm.h> 36 36 #include <VBox/vmm/vmm.h> 37 #include <VBox/vmm/h waccm.h>37 #include <VBox/vmm/hm.h> 38 38 #include "IOMInline.h" 39 39 … … 2360 2360 2361 2361 /* This currently only works in real mode, protected mode without paging or with nested paging. */ 2362 if ( !H WACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */2362 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */ 2363 2363 || ( CPUMIsGuestInPagedProtectedMode(pVCpu) 2364 && !H WACCMIsNestedPagingActive(pVM)))2364 && !HMIsNestedPagingActive(pVM))) 2365 2365 return VINF_SUCCESS; /* ignore */ 2366 2366 … … 2429 2429 2430 2430 AssertReturn(fPageFlags == (X86_PTE_RW | X86_PTE_P), VERR_INVALID_PARAMETER); 2431 Assert(H WACCMIsEnabled(pVM));2431 Assert(HMIsEnabled(pVM)); 2432 2432 2433 2433 PVMCPU pVCpu = VMMGetCpu(pVM); … … 2481 2481 2482 2482 /* This currently only works in real mode, protected mode without paging or with nested paging. */ 2483 if ( !H WACCMIsEnabled(pVM) /* useless without VT-x/AMD-V */2483 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */ 2484 2484 || ( CPUMIsGuestInPagedProtectedMode(pVCpu) 2485 && !H WACCMIsNestedPagingActive(pVM)))2485 && !HMIsNestedPagingActive(pVM))) 2486 2486 return VINF_SUCCESS; /* ignore */ 2487 2487 -
trunk/src/VBox/VMM/VMMAll/MMAll.cpp
r41965 r43387 640 640 TAG2STR(VMM); 641 641 642 TAG2STR(H WACCM);642 TAG2STR(HM); 643 643 644 644 #undef TAG2STR -
trunk/src/VBox/VMM/VMMAll/PDMAllCritSect.cpp
r41965 r43387 27 27 #include <VBox/vmm/vm.h> 28 28 #include <VBox/err.h> 29 #include <VBox/vmm/h waccm.h>29 #include <VBox/vmm/hm.h> 30 30 31 31 #include <VBox/log.h> … … 254 254 { 255 255 /* 256 * Leave H WACCM context while waiting if necessary.256 * Leave HM context while waiting if necessary. 257 257 */ 258 258 int rc; … … 267 267 PVM pVM = pCritSect->s.CTX_SUFF(pVM); 268 268 PVMCPU pVCpu = VMMGetCpu(pVM); 269 H WACCMR0Leave(pVM, pVCpu);269 HMR0Leave(pVM, pVCpu); 270 270 RTThreadPreemptRestore(NIL_RTTHREAD, ????); 271 271 … … 273 273 274 274 RTThreadPreemptDisable(NIL_RTTHREAD, ????); 275 H WACCMR0Enter(pVM, pVCpu);275 HMR0Enter(pVM, pVCpu); 276 276 } 277 277 return rc; -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r41965 r43387 34 34 #endif 35 35 #include <VBox/vmm/em.h> 36 #include <VBox/vmm/h waccm.h>37 #include <VBox/vmm/h wacc_vmx.h>36 #include <VBox/vmm/hm.h> 37 #include <VBox/vmm/hm_vmx.h> 38 38 #include "PGMInternal.h" 39 39 #include <VBox/vmm/vm.h> … … 958 958 * No need to monitor anything in this case. 959 959 */ 960 Assert(!H WACCMIsEnabled(pVM));960 Assert(!HMIsEnabled(pVM)); 961 961 962 962 GCPdPt = uGstPdpe & X86_PDPE_PG_MASK; -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r42188 r43387 1017 1017 */ 1018 1018 # if PGM_SHW_TYPE == PGM_TYPE_EPT 1019 H WACCMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault);1019 HMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault); 1020 1020 # else 1021 1021 PGM_INVL_PG(pVCpu, pvFault); -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r41965 r43387 437 437 PGM_INVL_VCPU_TLBS(VMMGetCpu0(pVM)); 438 438 # else 439 H WACCMFlushTLBOnAllVCpus(pVM);439 HMFlushTLBOnAllVCpus(pVM); 440 440 # endif 441 441 -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r43197 r43387 32 32 #include "PGMInline.h" 33 33 #include <VBox/disopcode.h> 34 #include <VBox/vmm/h wacc_vmx.h>34 #include <VBox/vmm/hm_vmx.h> 35 35 36 36 #include <VBox/log.h> … … 742 742 #ifndef IN_RC 743 743 /** @todo could make this general, faulting close to rsp should be a safe reuse heuristic. */ 744 if ( H WACCMHasPendingIrq(pVM)744 if ( HMHasPendingIrq(pVM) 745 745 && (pRegFrame->rsp - pvFault) < 32) 746 746 { -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r41802 r43387 479 479 SHW_PTE_ATOMIC_SET2(pPT->a[iPTE], NewPte); 480 480 # if PGM_SHW_TYPE == PGM_TYPE_EPT 481 H WACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr);481 HMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr); 482 482 # else 483 483 PGM_INVL_PG_ALL_VCPU(pVM, GCPtr); -
trunk/src/VBox/VMM/VMMAll/SELMAll.cpp
r42427 r43387 25 25 #include <VBox/vmm/mm.h> 26 26 #include <VBox/vmm/pgm.h> 27 #include <VBox/vmm/h waccm.h>27 #include <VBox/vmm/hm.h> 28 28 #include "SELMInternal.h" 29 29 #include <VBox/vmm/vm.h> … … 786 786 787 787 /* Undo ring compression. */ 788 if ((SelCPL & X86_SEL_RPL) == 1 && !H WACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))788 if ((SelCPL & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 789 789 SelCPL &= ~X86_SEL_RPL; 790 790 Assert(pSRegCS->Sel == SelCS); 791 if ((SelCS & X86_SEL_RPL) == 1 && !H WACCMIsEnabled(pVCpu->CTX_SUFF(pVM)))791 if ((SelCS & X86_SEL_RPL) == 1 && !HMIsEnabled(pVCpu->CTX_SUFF(pVM))) 792 792 SelCS &= ~X86_SEL_RPL; 793 793 #else -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r42464 r43387 26 26 #include <VBox/err.h> 27 27 #include <VBox/log.h> 28 #include <VBox/vmm/h waccm.h>28 #include <VBox/vmm/hm.h> 29 29 #include <iprt/assert.h> 30 30 #include <iprt/asm-amd64-x86.h> … … 407 407 if (!(pVCpu->cpum.s.fUseFlags & CPUM_SYNC_FPU_STATE)) 408 408 { 409 H WACCMR0SaveFPUState(pVM, pVCpu, pCtx);409 HMR0SaveFPUState(pVM, pVCpu, pCtx); 410 410 cpumR0RestoreHostFPUState(&pVCpu->cpum.s); 411 411 } … … 493 493 uint64_t dr6 = pCtx->dr[6]; 494 494 495 H WACCMR0SaveDebugState(pVM, pVCpu, pCtx);495 HMR0SaveDebugState(pVM, pVCpu, pCtx); 496 496 if (!fDR6) /* dr6 was already up-to-date */ 497 497 pCtx->dr[6] = dr6; -
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r43373 r43387 20 20 * Header Files * 21 21 *******************************************************************************/ 22 #define LOG_GROUP LOG_GROUP_H WACCM23 #include <VBox/vmm/h waccm.h>22 #define LOG_GROUP LOG_GROUP_HM 23 #include <VBox/vmm/hm.h> 24 24 #include <VBox/vmm/pgm.h> 25 #include "H WACCMInternal.h"25 #include "HMInternal.h" 26 26 #include <VBox/vmm/vm.h> 27 #include <VBox/vmm/h wacc_vmx.h>28 #include <VBox/vmm/h wacc_svm.h>27 #include <VBox/vmm/hm_vmx.h> 28 #include <VBox/vmm/hm_svm.h> 29 29 #include <VBox/err.h> 30 30 #include <VBox/log.h> … … 178 178 * simpler and hopefully easier to understand. */ 179 179 bool fEnabled; 180 /** Serialize initialization in H WACCMR0EnableAllCpus. */180 /** Serialize initialization in HMR0EnableAllCpus. */ 181 181 RTONCE EnableAllCpusOnce; 182 182 } g_HvmR0; … … 605 605 * @returns VBox status code. 606 606 */ 607 VMMR0DECL(int) H WACCMR0Init(void)607 VMMR0DECL(int) HMR0Init(void) 608 608 { 609 609 /* … … 676 676 hmR0InitAmd(u32FeaturesEDX); 677 677 else 678 g_HvmR0.lLastError = VERR_H WACCM_UNKNOWN_CPU;678 g_HvmR0.lLastError = VERR_HM_UNKNOWN_CPU; 679 679 } 680 680 else 681 g_HvmR0.lLastError = VERR_H WACCM_NO_CPUID;681 g_HvmR0.lLastError = VERR_HM_NO_CPUID; 682 682 683 683 /* … … 705 705 * @returns VBox status code. 706 706 */ 707 VMMR0DECL(int) H WACCMR0Term(void)707 VMMR0DECL(int) HMR0Term(void) 708 708 { 709 709 int rc; … … 768 768 769 769 /** 770 * Worker function used by hmR0PowerCallback and H WACCMR0Init to initalize770 * Worker function used by hmR0PowerCallback and HMR0Init to initalize 771 771 * VT-x on a CPU. 772 772 * … … 809 809 810 810 /** 811 * Worker function used by hmR0PowerCallback and H WACCMR0Init to initalize811 * Worker function used by hmR0PowerCallback and HMR0Init to initalize 812 812 * VT-x / AMD-V on a CPU. 813 813 * … … 911 911 912 912 /** 913 * RTOnce callback employed by H WACCMR0EnableAllCpus.913 * RTOnce callback employed by HMR0EnableAllCpus. 914 914 * 915 915 * @returns VBox status code. … … 934 934 * The global init variable is set by the first VM. 935 935 */ 936 g_HvmR0.fGlobalInit = pVM->h waccm.s.fGlobalInit;936 g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit; 937 937 938 938 for (unsigned i = 0; i < RT_ELEMENTS(g_HvmR0.aCpuInfo); i++) … … 953 953 if (RT_SUCCESS(rc)) 954 954 /* If the host provides a VT-x init API, then we'll rely on that for global init. */ 955 g_HvmR0.fGlobalInit = pVM->h waccm.s.fGlobalInit = true;955 g_HvmR0.fGlobalInit = pVM->hm.s.fGlobalInit = true; 956 956 else 957 957 AssertMsgFailed(("hmR0EnableAllCpuOnce/SUPR0EnableVTx: rc=%Rrc\n", rc)); … … 996 996 997 997 /** 998 * Sets up H WACCM on all cpus.998 * Sets up HM on all cpus. 999 999 * 1000 1000 * @returns VBox status code. 1001 1001 * @param pVM Pointer to the VM. 1002 1002 */ 1003 VMMR0DECL(int) H WACCMR0EnableAllCpus(PVM pVM)1004 { 1005 /* Make sure we don't touch h waccm after we've disabled hwaccm in1003 VMMR0DECL(int) HMR0EnableAllCpus(PVM pVM) 1004 { 1005 /* Make sure we don't touch hm after we've disabled hm in 1006 1006 preparation of a suspend. */ 1007 1007 if (ASMAtomicReadBool(&g_HvmR0.fSuspended)) 1008 return VERR_H WACCM_SUSPEND_PENDING;1008 return VERR_HM_SUSPEND_PENDING; 1009 1009 1010 1010 return RTOnce(&g_HvmR0.EnableAllCpusOnce, hmR0EnableAllCpuOnce, pVM, NULL); … … 1086 1086 /* 1087 1087 * We only care about uninitializing a CPU that is going offline. When a 1088 * CPU comes online, the initialization is done lazily in H WACCMR0Enter().1088 * CPU comes online, the initialization is done lazily in HMR0Enter(). 1089 1089 */ 1090 1090 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1180 1180 * @param pVM Pointer to the VM. 1181 1181 */ 1182 VMMR0DECL(int) H WACCMR0InitVM(PVM pVM)1182 VMMR0DECL(int) HMR0InitVM(PVM pVM) 1183 1183 { 1184 1184 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1185 1185 1186 1186 #ifdef LOG_ENABLED 1187 SUPR0Printf("H WACCMR0InitVM: %p\n", pVM);1187 SUPR0Printf("HMR0InitVM: %p\n", pVM); 1188 1188 #endif 1189 1189 1190 /* Make sure we don't touch h waccm after we've disabled hwaccm in preparation of a suspend. */1190 /* Make sure we don't touch hm after we've disabled hm in preparation of a suspend. */ 1191 1191 if (ASMAtomicReadBool(&g_HvmR0.fSuspended)) 1192 return VERR_H WACCM_SUSPEND_PENDING;1192 return VERR_HM_SUSPEND_PENDING; 1193 1193 1194 1194 /* 1195 1195 * Copy globals to the VM structure. 1196 1196 */ 1197 pVM->h waccm.s.vmx.fSupported = g_HvmR0.vmx.fSupported;1198 pVM->h waccm.s.svm.fSupported = g_HvmR0.svm.fSupported;1199 1200 pVM->h waccm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer;1201 pVM->h waccm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift;1202 pVM->h waccm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr.feature_ctrl;1203 pVM->h waccm.s.vmx.hostCR4 = g_HvmR0.vmx.hostCR4;1204 pVM->h waccm.s.vmx.hostEFER = g_HvmR0.vmx.hostEFER;1205 pVM->h waccm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr.vmx_basic_info;1206 pVM->h waccm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmx_pin_ctls;1207 pVM->h waccm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmx_proc_ctls;1208 pVM->h waccm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmx_proc_ctls2;1209 pVM->h waccm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmx_exit;1210 pVM->h waccm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmx_entry;1211 pVM->h waccm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr.vmx_misc;1212 pVM->h waccm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr.vmx_cr0_fixed0;1213 pVM->h waccm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr.vmx_cr0_fixed1;1214 pVM->h waccm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr.vmx_cr4_fixed0;1215 pVM->h waccm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr.vmx_cr4_fixed1;1216 pVM->h waccm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.vmx_vmcs_enum;1217 pVM->h waccm.s.vmx.msr.vmx_eptcaps = g_HvmR0.vmx.msr.vmx_eptcaps;1218 pVM->h waccm.s.svm.msrHWCR = g_HvmR0.svm.msrHWCR;1219 pVM->h waccm.s.svm.u32Rev = g_HvmR0.svm.u32Rev;1220 pVM->h waccm.s.svm.u32Features = g_HvmR0.svm.u32Features;1221 pVM->h waccm.s.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureECX;1222 pVM->h waccm.s.cpuid.u32AMDFeatureEDX = g_HvmR0.cpuid.u32AMDFeatureEDX;1223 pVM->h waccm.s.lLastError = g_HvmR0.lLastError;1224 1225 pVM->h waccm.s.uMaxASID = g_HvmR0.uMaxASID;1226 1227 1228 if (!pVM->h waccm.s.cMaxResumeLoops) /* allow ring-3 overrides */1229 { 1230 pVM->h waccm.s.cMaxResumeLoops = 1024;1197 pVM->hm.s.vmx.fSupported = g_HvmR0.vmx.fSupported; 1198 pVM->hm.s.svm.fSupported = g_HvmR0.svm.fSupported; 1199 1200 pVM->hm.s.vmx.fUsePreemptTimer = g_HvmR0.vmx.fUsePreemptTimer; 1201 pVM->hm.s.vmx.cPreemptTimerShift = g_HvmR0.vmx.cPreemptTimerShift; 1202 pVM->hm.s.vmx.msr.feature_ctrl = g_HvmR0.vmx.msr.feature_ctrl; 1203 pVM->hm.s.vmx.hostCR4 = g_HvmR0.vmx.hostCR4; 1204 pVM->hm.s.vmx.hostEFER = g_HvmR0.vmx.hostEFER; 1205 pVM->hm.s.vmx.msr.vmx_basic_info = g_HvmR0.vmx.msr.vmx_basic_info; 1206 pVM->hm.s.vmx.msr.vmx_pin_ctls = g_HvmR0.vmx.msr.vmx_pin_ctls; 1207 pVM->hm.s.vmx.msr.vmx_proc_ctls = g_HvmR0.vmx.msr.vmx_proc_ctls; 1208 pVM->hm.s.vmx.msr.vmx_proc_ctls2 = g_HvmR0.vmx.msr.vmx_proc_ctls2; 1209 pVM->hm.s.vmx.msr.vmx_exit = g_HvmR0.vmx.msr.vmx_exit; 1210 pVM->hm.s.vmx.msr.vmx_entry = g_HvmR0.vmx.msr.vmx_entry; 1211 pVM->hm.s.vmx.msr.vmx_misc = g_HvmR0.vmx.msr.vmx_misc; 1212 pVM->hm.s.vmx.msr.vmx_cr0_fixed0 = g_HvmR0.vmx.msr.vmx_cr0_fixed0; 1213 pVM->hm.s.vmx.msr.vmx_cr0_fixed1 = g_HvmR0.vmx.msr.vmx_cr0_fixed1; 1214 pVM->hm.s.vmx.msr.vmx_cr4_fixed0 = g_HvmR0.vmx.msr.vmx_cr4_fixed0; 1215 pVM->hm.s.vmx.msr.vmx_cr4_fixed1 = g_HvmR0.vmx.msr.vmx_cr4_fixed1; 1216 pVM->hm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.vmx_vmcs_enum; 1217 pVM->hm.s.vmx.msr.vmx_eptcaps = g_HvmR0.vmx.msr.vmx_eptcaps; 1218 pVM->hm.s.svm.msrHWCR = g_HvmR0.svm.msrHWCR; 1219 pVM->hm.s.svm.u32Rev = g_HvmR0.svm.u32Rev; 1220 pVM->hm.s.svm.u32Features = g_HvmR0.svm.u32Features; 1221 pVM->hm.s.cpuid.u32AMDFeatureECX = g_HvmR0.cpuid.u32AMDFeatureECX; 1222 pVM->hm.s.cpuid.u32AMDFeatureEDX = g_HvmR0.cpuid.u32AMDFeatureEDX; 1223 pVM->hm.s.lLastError = g_HvmR0.lLastError; 1224 1225 pVM->hm.s.uMaxASID = g_HvmR0.uMaxASID; 1226 1227 1228 if (!pVM->hm.s.cMaxResumeLoops) /* allow ring-3 overrides */ 1229 { 1230 pVM->hm.s.cMaxResumeLoops = 1024; 1231 1231 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1232 1232 if (RTThreadPreemptIsPendingTrusty()) 1233 pVM->h waccm.s.cMaxResumeLoops = 8192;1233 pVM->hm.s.cMaxResumeLoops = 8192; 1234 1234 #endif 1235 1235 } … … 1242 1242 PVMCPU pVCpu = &pVM->aCpus[i]; 1243 1243 1244 pVCpu->h waccm.s.idEnteredCpu = NIL_RTCPUID;1244 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1245 1245 1246 1246 /* Invalidate the last cpu we were running on. */ 1247 pVCpu->h waccm.s.idLastCpu = NIL_RTCPUID;1247 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1248 1248 1249 1249 /* We'll aways increment this the first time (host uses ASID 0) */ 1250 pVCpu->h waccm.s.uCurrentASID = 0;1250 pVCpu->hm.s.uCurrentASID = 0; 1251 1251 } 1252 1252 … … 1260 1260 */ 1261 1261 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1262 PHMGLOBLCPUINFO pCpu = H WACCMR0GetCurrentCpu();1262 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1263 1263 ASMAtomicWriteBool(&pCpu->fInUse, true); 1264 1264 ASMSetFlags(fFlags); … … 1277 1277 * @param pVM Pointer to the VM. 1278 1278 */ 1279 VMMR0DECL(int) H WACCMR0TermVM(PVM pVM)1280 { 1281 Log(("H WACCMR0TermVM: %p\n", pVM));1279 VMMR0DECL(int) HMR0TermVM(PVM pVM) 1280 { 1281 Log(("HMR0TermVM: %p\n", pVM)); 1282 1282 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1283 1283 1284 /* Make sure we don't touch hm after we've disabled h waccm in preparation1284 /* Make sure we don't touch hm after we've disabled hm in preparation 1285 1285 of a suspend. */ 1286 1286 /** @todo r=bird: This cannot be right, the termination functions are 1287 1287 * just freeing memory and resetting pVM/pVCpu members... 1288 1288 * ==> memory leak. */ 1289 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1289 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1290 1290 1291 1291 /* … … 1296 1296 */ 1297 1297 RTCCUINTREG fFlags = ASMIntDisableFlags(); 1298 PHMGLOBLCPUINFO pCpu = H WACCMR0GetCurrentCpu();1298 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1299 1299 ASMAtomicWriteBool(&pCpu->fInUse, true); 1300 1300 ASMSetFlags(fFlags); … … 1315 1315 * @param pVM Pointer to the VM. 1316 1316 */ 1317 VMMR0DECL(int) H WACCMR0SetupVM(PVM pVM)1318 { 1319 Log(("H WACCMR0SetupVM: %p\n", pVM));1317 VMMR0DECL(int) HMR0SetupVM(PVM pVM) 1318 { 1319 Log(("HMR0SetupVM: %p\n", pVM)); 1320 1320 AssertReturn(pVM, VERR_INVALID_PARAMETER); 1321 1321 1322 /* Make sure we don't touch h waccm after we've disabled hwaccm in1322 /* Make sure we don't touch hm after we've disabled hm in 1323 1323 preparation of a suspend. */ 1324 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1324 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1325 1325 1326 1326 … … 1336 1336 /* On first entry we'll sync everything. */ 1337 1337 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1338 pVM->aCpus[i].h waccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;1338 pVM->aCpus[i].hm.s.fContextUseFlags = HM_CHANGED_ALL; 1339 1339 1340 1340 /* Enable VT-x or AMD-V if local init is required. */ … … 1372 1372 * @remarks This is called with preemption disabled. 1373 1373 */ 1374 VMMR0DECL(int) H WACCMR0Enter(PVM pVM, PVMCPU pVCpu)1374 VMMR0DECL(int) HMR0Enter(PVM pVM, PVMCPU pVCpu) 1375 1375 { 1376 1376 RTCPUID idCpu = RTMpCpuId(); … … 1378 1378 1379 1379 /* Make sure we can't enter a session after we've disabled HM in preparation of a suspend. */ 1380 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1380 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1381 1381 ASMAtomicWriteBool(&pCpu->fInUse, true); 1382 1382 1383 AssertMsg(pVCpu->h waccm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hwaccm.s.idEnteredCpu));1384 pVCpu->h waccm.s.idEnteredCpu = idCpu;1383 AssertMsg(pVCpu->hm.s.idEnteredCpu == NIL_RTCPUID, ("%d", (int)pVCpu->hm.s.idEnteredCpu)); 1384 pVCpu->hm.s.idEnteredCpu = idCpu; 1385 1385 1386 1386 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); … … 1393 1393 1394 1394 /* Always reload the host context and the guest's CR0 register. (!!!!) */ 1395 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;1395 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_HOST_CONTEXT; 1396 1396 1397 1397 /* Setup the register and mask according to the current execution mode. */ 1398 1398 if (pCtx->msrEFER & MSR_K6_EFER_LMA) 1399 pVM->h waccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF);1399 pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFFFFFFFFFF); 1400 1400 else 1401 pVM->h waccm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF);1401 pVM->hm.s.u64RegisterMask = UINT64_C(0xFFFFFFFF); 1402 1402 1403 1403 /* Enable VT-x or AMD-V if local init is required, or enable if it's a … … 1432 1432 and ring-3 calls. */ 1433 1433 if (RT_FAILURE(rc)) 1434 pVCpu->h waccm.s.idEnteredCpu = NIL_RTCPUID;1434 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1435 1435 return rc; 1436 1436 } … … 1444 1444 * @param pVCpu Pointer to the VMCPU. 1445 1445 * 1446 * @remarks Called with preemption disabled just like H WACCMR0Enter, our1446 * @remarks Called with preemption disabled just like HMR0Enter, our 1447 1447 * counterpart. 1448 1448 */ 1449 VMMR0DECL(int) H WACCMR0Leave(PVM pVM, PVMCPU pVCpu)1449 VMMR0DECL(int) HMR0Leave(PVM pVM, PVMCPU pVCpu) 1450 1450 { 1451 1451 int rc; … … 1455 1455 1456 1456 /** @todo r=bird: This can't be entirely right? */ 1457 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1457 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1458 1458 1459 1459 /* … … 1470 1470 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1471 1471 1472 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;1472 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1473 1473 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1474 1474 } … … 1479 1479 guests, so we must make sure the recompiler flushes its TLB the next 1480 1480 time it executes code. */ 1481 if ( pVM->h waccm.s.fNestedPaging1481 if ( pVM->hm.s.fNestedPaging 1482 1482 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) 1483 1483 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH); … … 1485 1485 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness 1486 1486 and ring-3 calls. */ 1487 AssertMsgStmt( pVCpu->h waccm.s.idEnteredCpu == idCpu1487 AssertMsgStmt( pVCpu->hm.s.idEnteredCpu == idCpu 1488 1488 || RT_FAILURE_NP(rc), 1489 ("Owner is %u, I'm %u", pVCpu->h waccm.s.idEnteredCpu, idCpu),1489 ("Owner is %u, I'm %u", pVCpu->hm.s.idEnteredCpu, idCpu), 1490 1490 rc = VERR_HM_WRONG_CPU_1); 1491 pVCpu->h waccm.s.idEnteredCpu = NIL_RTCPUID;1491 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1492 1492 1493 1493 /* … … 1500 1500 1501 1501 /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */ 1502 pVCpu->h waccm.s.idLastCpu = NIL_RTCPUID;1503 pVCpu->h waccm.s.uCurrentASID = 0;1502 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1503 pVCpu->hm.s.uCurrentASID = 0; 1504 1504 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1505 1505 } … … 1518 1518 * 1519 1519 * @remarks Called with preemption disabled and after first having called 1520 * H WACCMR0Enter.1521 */ 1522 VMMR0DECL(int) H WACCMR0RunGuestCode(PVM pVM, PVMCPU pVCpu)1520 * HMR0Enter. 1521 */ 1522 VMMR0DECL(int) HMR0RunGuestCode(PVM pVM, PVMCPU pVCpu) 1523 1523 { 1524 1524 #ifdef VBOX_STRICT … … 1526 1526 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1527 1527 Assert(pCpu->fConfigured); 1528 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_H WACCM_SUSPEND_PENDING);1528 AssertReturn(!ASMAtomicReadBool(&g_HvmR0.fSuspended), VERR_HM_SUSPEND_PENDING); 1529 1529 Assert(ASMAtomicReadBool(&pCpu->fInUse) == true); 1530 1530 #endif … … 1552 1552 * @param pCtx Pointer to the guest CPU context. 1553 1553 */ 1554 VMMR0DECL(int) H WACCMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)1555 { 1556 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFpu64SwitchBack);1557 if (pVM->h waccm.s.vmx.fSupported)1558 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestFPU64, 0, NULL);1559 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestFPU64, 0, NULL);1554 VMMR0DECL(int) HMR0SaveFPUState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1555 { 1556 STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack); 1557 if (pVM->hm.s.vmx.fSupported) 1558 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL); 1559 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestFPU64, 0, NULL); 1560 1560 } 1561 1561 … … 1569 1569 * @param pCtx Pointer to the guest CPU context. 1570 1570 */ 1571 VMMR0DECL(int) H WACCMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)1572 { 1573 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDebug64SwitchBack);1574 if (pVM->h waccm.s.vmx.fSupported)1575 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestDebug64, 0, NULL);1576 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSaveGuestDebug64, 0, NULL);1571 VMMR0DECL(int) HMR0SaveDebugState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1572 { 1573 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack); 1574 if (pVM->hm.s.vmx.fSupported) 1575 return VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL); 1576 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSaveGuestDebug64, 0, NULL); 1577 1577 } 1578 1578 … … 1584 1584 * @param pVM Pointer to the VM. 1585 1585 */ 1586 VMMR0DECL(int) H WACCMR0TestSwitcher3264(PVM pVM)1586 VMMR0DECL(int) HMR0TestSwitcher3264(PVM pVM) 1587 1587 { 1588 1588 PVMCPU pVCpu = &pVM->aCpus[0]; … … 1591 1591 int rc; 1592 1592 1593 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatWorldSwitch3264, z);1594 if (pVM->h waccm.s.vmx.fSupported)1595 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnTest64, 5, &aParam[0]);1593 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 1594 if (pVM->hm.s.vmx.fSupported) 1595 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]); 1596 1596 else 1597 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnTest64, 5, &aParam[0]);1598 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatWorldSwitch3264, z);1597 rc = SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnTest64, 5, &aParam[0]); 1598 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 1599 1599 1600 1600 return rc; … … 1608 1608 * @returns Suspend pending or not. 1609 1609 */ 1610 VMMR0DECL(bool) H WACCMR0SuspendPending(void)1610 VMMR0DECL(bool) HMR0SuspendPending(void) 1611 1611 { 1612 1612 return ASMAtomicReadBool(&g_HvmR0.fSuspended); … … 1620 1620 * @returns The cpu structure pointer. 1621 1621 */ 1622 VMMR0DECL(PHMGLOBLCPUINFO) H WACCMR0GetCurrentCpu(void)1622 VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void) 1623 1623 { 1624 1624 RTCPUID idCpu = RTMpCpuId(); … … 1635 1635 * @param idCpu id of the VCPU. 1636 1636 */ 1637 VMMR0DECL(PHMGLOBLCPUINFO) H WACCMR0GetCurrentCpuEx(RTCPUID idCpu)1637 VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu) 1638 1638 { 1639 1639 Assert(idCpu < RT_ELEMENTS(g_HvmR0.aCpuInfo)); … … 1652 1652 * @param cbSize Read size. 1653 1653 */ 1654 VMMR0DECL(void) H WACCMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)1655 { 1656 pVCpu->h waccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_READ;1657 pVCpu->h waccm.s.PendingIO.GCPtrRip = GCPtrRip;1658 pVCpu->h waccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;1659 pVCpu->h waccm.s.PendingIO.s.Port.uPort = uPort;1660 pVCpu->h waccm.s.PendingIO.s.Port.uAndVal = uAndVal;1661 pVCpu->h waccm.s.PendingIO.s.Port.cbSize = cbSize;1654 VMMR0DECL(void) HMR0SavePendingIOPortRead(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize) 1655 { 1656 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_PORT_READ; 1657 pVCpu->hm.s.PendingIO.GCPtrRip = GCPtrRip; 1658 pVCpu->hm.s.PendingIO.GCPtrRipNext = GCPtrRipNext; 1659 pVCpu->hm.s.PendingIO.s.Port.uPort = uPort; 1660 pVCpu->hm.s.PendingIO.s.Port.uAndVal = uAndVal; 1661 pVCpu->hm.s.PendingIO.s.Port.cbSize = cbSize; 1662 1662 return; 1663 1663 } … … 1673 1673 * @param cbSize Read size. 1674 1674 */ 1675 VMMR0DECL(void) H WACCMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize)1676 { 1677 pVCpu->h waccm.s.PendingIO.enmType = HWACCMPENDINGIO_PORT_WRITE;1678 pVCpu->h waccm.s.PendingIO.GCPtrRip = GCPtrRip;1679 pVCpu->h waccm.s.PendingIO.GCPtrRipNext = GCPtrRipNext;1680 pVCpu->h waccm.s.PendingIO.s.Port.uPort = uPort;1681 pVCpu->h waccm.s.PendingIO.s.Port.uAndVal = uAndVal;1682 pVCpu->h waccm.s.PendingIO.s.Port.cbSize = cbSize;1675 VMMR0DECL(void) HMR0SavePendingIOPortWrite(PVMCPU pVCpu, RTGCPTR GCPtrRip, RTGCPTR GCPtrRipNext, unsigned uPort, unsigned uAndVal, unsigned cbSize) 1676 { 1677 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_PORT_WRITE; 1678 pVCpu->hm.s.PendingIO.GCPtrRip = GCPtrRip; 1679 pVCpu->hm.s.PendingIO.GCPtrRipNext = GCPtrRipNext; 1680 pVCpu->hm.s.PendingIO.s.Port.uPort = uPort; 1681 pVCpu->hm.s.PendingIO.s.Port.uAndVal = uAndVal; 1682 pVCpu->hm.s.PendingIO.s.Port.cbSize = cbSize; 1683 1683 return; 1684 1684 } … … 1691 1691 * @returns VBox status code. 1692 1692 * @param pVM Pointer to the VM. 1693 * @param enmSwitcher The switcher we're about to use. 1693 1694 * @param pfVTxDisabled Where to store whether VT-x was disabled or not. 1694 1695 */ 1695 VMMR0DECL(int) H WACCMR0EnterSwitcher(PVM pVM, bool *pfVTxDisabled)1696 VMMR0DECL(int) HMR0EnterSwitcher(PVM pVM, VMMSWITCHER enmSwitcher, bool *pfVTxDisabled) 1696 1697 { 1697 1698 Assert(!(ASMGetFlags() & X86_EFL_IF) || !RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1699 1700 *pfVTxDisabled = false; 1700 1701 1701 if ( !g_HvmR0.fEnabled1702 || !g_HvmR0.vmx.fSupported /* no such issues with AMD-V */1703 || !g_HvmR0.fGlobalInit /* Local init implies the CPU is currently not in VMX root mode. */)1704 return VINF_SUCCESS; /* nothing to do */ 1705 1706 switch ( VMMGetSwitcher(pVM))1702 /* No such issues with AMD-V */ 1703 if (!g_HvmR0.vmx.fSupported) 1704 return VINF_SUCCESS; 1705 1706 /* Check if the swithcing we're up to is safe. */ 1707 switch (enmSwitcher) 1707 1708 { 1708 1709 case VMMSWITCHER_32_TO_32: … … 1720 1721 } 1721 1722 1722 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1723 /* When using SUPR0EnableVTx we must let the host suspend and resume VT-x, 1724 regardless of whether we're currently using VT-x or not. */ 1725 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx) 1726 { 1727 *pfVTxDisabled = SUPR0SuspendVTxOnCpu(); 1728 return VINF_SUCCESS; 1729 } 1730 1731 /** @todo Check if this code is presumtive wrt other VT-x users on the 1732 * system... */ 1733 1734 /* Nothing to do if we haven't enabled VT-x. */ 1735 if (!g_HvmR0.fEnabled) 1736 return VINF_SUCCESS; 1737 1738 /* Local init implies the CPU is currently not in VMX root mode. */ 1739 if (!g_HvmR0.fGlobalInit) 1740 return VINF_SUCCESS; 1741 1742 /* Ok, disable VT-x. */ 1743 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1723 1744 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2); 1724 1745 … … 1734 1755 * switcher turned off paging. 1735 1756 * 1736 * @returns VBox status code.1737 1757 * @param pVM Pointer to the VM. 1738 1758 * @param fVTxDisabled Whether VT-x was disabled or not. 1739 1759 */ 1740 VMMR0DECL( int) HWACCMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled)1760 VMMR0DECL(void) HMR0LeaveSwitcher(PVM pVM, bool fVTxDisabled) 1741 1761 { 1742 1762 Assert(!(ASMGetFlags() & X86_EFL_IF)); 1743 1763 1744 1764 if (!fVTxDisabled) 1745 return VINF_SUCCESS; /* nothing to do */ 1746 1747 Assert(g_HvmR0.fEnabled); 1765 return; /* nothing to do */ 1766 1748 1767 Assert(g_HvmR0.vmx.fSupported); 1749 Assert(g_HvmR0.fGlobalInit); 1750 1751 PHMGLOBLCPUINFO pCpu = HWACCMR0GetCurrentCpu(); 1752 AssertReturn(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ, VERR_HM_IPE_2); 1753 1754 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1755 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1756 return VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false); 1768 if (g_HvmR0.vmx.fUsingSUPR0EnableVTx) 1769 SUPR0ResumeVTxOnCpu(fVTxDisabled); 1770 else 1771 { 1772 Assert(g_HvmR0.fEnabled); 1773 Assert(g_HvmR0.fGlobalInit); 1774 1775 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 1776 AssertReturnVoid(pCpu && pCpu->hMemObj != NIL_RTR0MEMOBJ); 1777 1778 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1779 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1780 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false); 1781 } 1757 1782 } 1758 1783 … … 1766 1791 * @param pszMsg Message to prepend the log entry with. 1767 1792 */ 1768 VMMR0DECL(void) H WACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg)1793 VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg) 1769 1794 { 1770 1795 /* … … 1887 1912 * @param pCtx Pointer to the CPU context. 1888 1913 */ 1889 VMMR0DECL(void) H WACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)1914 VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1890 1915 { 1891 1916 NOREF(pVM); -
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r43373 r43387 21 21 %include "VBox/asmdefs.mac" 22 22 %include "VBox/err.mac" 23 %include "VBox/vmm/h wacc_vmx.mac"23 %include "VBox/vmm/hm_vmx.mac" 24 24 %include "VBox/vmm/cpum.mac" 25 25 %include "iprt/x86.mac" 26 %include "H WACCMInternal.mac"26 %include "HMInternal.mac" 27 27 28 28 %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely. … … 56 56 ; Load the NULL selector into DS, ES, FS and GS on 64-bit darwin so we don't 57 57 ; risk loading a stale LDT value or something invalid. 58 %define H WACCM_64_BIT_USE_NULL_SEL58 %define HM_64_BIT_USE_NULL_SEL 59 59 %endif 60 60 %endif … … 157 157 ; trashes, rax, rdx & rcx 158 158 %macro MYPUSHSEGS64 2 159 %ifndef H WACCM_64_BIT_USE_NULL_SEL159 %ifndef HM_64_BIT_USE_NULL_SEL 160 160 mov %2, es 161 161 push %1 … … 169 169 push rdx 170 170 push rax 171 %ifndef H WACCM_64_BIT_USE_NULL_SEL171 %ifndef HM_64_BIT_USE_NULL_SEL 172 172 push fs 173 173 %endif … … 178 178 push rdx 179 179 push rax 180 %ifndef H WACCM_64_BIT_USE_NULL_SEL180 %ifndef HM_64_BIT_USE_NULL_SEL 181 181 push gs 182 182 %endif … … 186 186 %macro MYPOPSEGS64 2 187 187 ; Note: do not step through this code with a debugger! 188 %ifndef H WACCM_64_BIT_USE_NULL_SEL188 %ifndef HM_64_BIT_USE_NULL_SEL 189 189 xor eax, eax 190 190 mov ds, ax … … 194 194 %endif 195 195 196 %ifndef H WACCM_64_BIT_USE_NULL_SEL196 %ifndef HM_64_BIT_USE_NULL_SEL 197 197 pop gs 198 198 %endif … … 202 202 wrmsr 203 203 204 %ifndef H WACCM_64_BIT_USE_NULL_SEL204 %ifndef HM_64_BIT_USE_NULL_SEL 205 205 pop fs 206 206 %endif … … 211 211 ; Now it's safe to step again 212 212 213 %ifndef H WACCM_64_BIT_USE_NULL_SEL213 %ifndef HM_64_BIT_USE_NULL_SEL 214 214 pop %1 215 215 mov ds, %2 … … 971 971 ; * @param pIdtr Where to store the 64-bit IDTR. 972 972 ; */ 973 ;DECLASM(void) h waccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);974 ALIGNCODE(16) 975 BEGINPROC h waccmR0Get64bitGDTRandIDTR973 ;DECLASM(void) hmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr); 974 ALIGNCODE(16) 975 BEGINPROC hmR0Get64bitGDTRandIDTR 976 976 db 0xea ; jmp far .sixtyfourbit_mode 977 977 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) … … 991 991 dd .the_end, NAME(SUPR0AbsKernelCS) 992 992 BITS 32 993 ENDPROC h waccmR0Get64bitGDTRandIDTR993 ENDPROC hmR0Get64bitGDTRandIDTR 994 994 995 995 … … 998 998 ; * @returns CR3 999 999 ; */ 1000 ;DECLASM(uint64_t) h waccmR0Get64bitCR3(void);1001 ALIGNCODE(16) 1002 BEGINPROC h waccmR0Get64bitCR31000 ;DECLASM(uint64_t) hmR0Get64bitCR3(void); 1001 ALIGNCODE(16) 1002 BEGINPROC hmR0Get64bitCR3 1003 1003 db 0xea ; jmp far .sixtyfourbit_mode 1004 1004 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) … … 1016 1016 dd .the_end, NAME(SUPR0AbsKernelCS) 1017 1017 BITS 32 1018 ENDPROC h waccmR0Get64bitCR31018 ENDPROC hmR0Get64bitCR3 1019 1019 1020 1020 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL … … 1026 1026 ; load the guest ones when necessary. 1027 1027 ; 1028 ; @cproto DECLASM(int) h waccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);1028 ; @cproto DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM); 1029 1029 ; 1030 1030 ; @returns eax … … 1037 1037 ; @param pfnStartVM msc:[rbp+38h] 1038 1038 ; 1039 ; @remarks This is essentially the same code as h waccmR0SVMRunWrapXMM, only the parameters differ a little bit.1039 ; @remarks This is essentially the same code as hmR0SVMRunWrapXMM, only the parameters differ a little bit. 1040 1040 ; 1041 1041 ; ASSUMING 64-bit and windows for now. 1042 1042 ALIGNCODE(16) 1043 BEGINPROC h waccmR0VMXStartVMWrapXMM1043 BEGINPROC hmR0VMXStartVMWrapXMM 1044 1044 push xBP 1045 1045 mov xBP, xSP … … 1148 1148 leave 1149 1149 ret 1150 ENDPROC h waccmR0VMXStartVMWrapXMM1150 ENDPROC hmR0VMXStartVMWrapXMM 1151 1151 1152 1152 ;; … … 1154 1154 ; load the guest ones when necessary. 1155 1155 ; 1156 ; @cproto DECLASM(int) h waccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);1156 ; @cproto DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 1157 1157 ; 1158 1158 ; @returns eax … … 1165 1165 ; @param pfnVMRun msc:[rbp+38h] 1166 1166 ; 1167 ; @remarks This is essentially the same code as h waccmR0VMXStartVMWrapXMM, only the parameters differ a little bit.1167 ; @remarks This is essentially the same code as hmR0VMXStartVMWrapXMM, only the parameters differ a little bit. 1168 1168 ; 1169 1169 ; ASSUMING 64-bit and windows for now. 1170 1170 ALIGNCODE(16) 1171 BEGINPROC h waccmR0SVMRunWrapXMM1171 BEGINPROC hmR0SVMRunWrapXMM 1172 1172 push xBP 1173 1173 mov xBP, xSP … … 1276 1276 leave 1277 1277 ret 1278 ENDPROC h waccmR0SVMRunWrapXMM1278 ENDPROC hmR0SVMRunWrapXMM 1279 1279 1280 1280 %endif ; VBOX_WITH_KERNEL_USING_XMM … … 1300 1300 %endif 1301 1301 1302 %include "H WACCMR0Mixed.mac"1302 %include "HMR0Mixed.mac" 1303 1303 1304 1304 … … 1503 1503 %define MYPOPSEGS MYPOPSEGS64 1504 1504 1505 %include "H WACCMR0Mixed.mac"1505 %include "HMR0Mixed.mac" 1506 1506 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r43373 r43387 1 1 ; $Id$ 2 2 ;; @file 3 ; H WACCMR0Mixed.mac - Stuff that darwin needs to build two versions of.3 ; HMR0Mixed.mac - Stuff that darwin needs to build two versions of. 4 4 ; 5 ; Included by H WACCMR0A.asm with RT_ARCH_AMD64 defined or or undefined.5 ; Included by HMR0A.asm with RT_ARCH_AMD64 defined or or undefined. 6 6 ; 7 7 -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r43353 r43387 19 19 * Header Files * 20 20 *******************************************************************************/ 21 #define LOG_GROUP LOG_GROUP_H WACCM22 #include <VBox/vmm/h waccm.h>21 #define LOG_GROUP LOG_GROUP_HM 22 #include <VBox/vmm/hm.h> 23 23 #include <VBox/vmm/pgm.h> 24 24 #include <VBox/vmm/selm.h> … … 28 28 #include <VBox/vmm/tm.h> 29 29 #include <VBox/vmm/pdmapi.h> 30 #include "H WACCMInternal.h"30 #include "HMInternal.h" 31 31 #include <VBox/vmm/vm.h> 32 #include <VBox/vmm/h wacc_svm.h>32 #include <VBox/vmm/hm_svm.h> 33 33 #include <VBox/err.h> 34 34 #include <VBox/log.h> … … 92 92 */ 93 93 if ( pVM 94 && pVM->h waccm.s.svm.fIgnoreInUseError)94 && pVM->hm.s.svm.fIgnoreInUseError) 95 95 { 96 96 pCpu->fIgnoreAMDVInUseError = true; … … 159 159 int rc; 160 160 161 pVM->h waccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;161 pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ; 162 162 163 163 /* Allocate 12 KB for the IO bitmap (doesn't seem to be a way to convince SVM not to use it) */ 164 rc = RTR0MemObjAllocCont(&pVM->h waccm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */);164 rc = RTR0MemObjAllocCont(&pVM->hm.s.svm.pMemObjIOBitmap, 3 << PAGE_SHIFT, false /* executable R0 mapping */); 165 165 if (RT_FAILURE(rc)) 166 166 return rc; 167 167 168 pVM->h waccm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hwaccm.s.svm.pMemObjIOBitmap);169 pVM->h waccm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.svm.pMemObjIOBitmap, 0);168 pVM->hm.s.svm.pIOBitmap = RTR0MemObjAddress(pVM->hm.s.svm.pMemObjIOBitmap); 169 pVM->hm.s.svm.pIOBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.svm.pMemObjIOBitmap, 0); 170 170 /* Set all bits to intercept all IO accesses. */ 171 ASMMemFill32(pVM->h waccm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff);171 ASMMemFill32(pVM->hm.s.svm.pIOBitmap, 3 << PAGE_SHIFT, 0xffffffff); 172 172 173 173 /* … … 199 199 { 200 200 Log(("SVMR0InitVM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping)); 201 pVM->h waccm.s.svm.fAlwaysFlushTLB = true;201 pVM->hm.s.svm.fAlwaysFlushTLB = true; 202 202 } 203 203 … … 207 207 PVMCPU pVCpu = &pVM->aCpus[i]; 208 208 209 pVCpu->h waccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;210 pVCpu->h waccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;211 pVCpu->h waccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;209 pVCpu->hm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ; 210 pVCpu->hm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ; 211 pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ; 212 212 213 213 /* Allocate one page for the host context */ 214 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */);214 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCBHost, 1 << PAGE_SHIFT, false /* executable R0 mapping */); 215 215 if (RT_FAILURE(rc)) 216 216 return rc; 217 217 218 pVCpu->h waccm.s.svm.pVMCBHost = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCBHost);219 pVCpu->h waccm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCBHost, 0);220 Assert(pVCpu->h waccm.s.svm.pVMCBHostPhys < _4G);221 ASMMemZeroPage(pVCpu->h waccm.s.svm.pVMCBHost);218 pVCpu->hm.s.svm.pVMCBHost = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCBHost); 219 pVCpu->hm.s.svm.pVMCBHostPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCBHost, 0); 220 Assert(pVCpu->hm.s.svm.pVMCBHostPhys < _4G); 221 ASMMemZeroPage(pVCpu->hm.s.svm.pVMCBHost); 222 222 223 223 /* Allocate one page for the VM control block (VMCB). */ 224 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */);224 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjVMCB, 1 << PAGE_SHIFT, false /* executable R0 mapping */); 225 225 if (RT_FAILURE(rc)) 226 226 return rc; 227 227 228 pVCpu->h waccm.s.svm.pVMCB = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjVMCB);229 pVCpu->h waccm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjVMCB, 0);230 Assert(pVCpu->h waccm.s.svm.pVMCBPhys < _4G);231 ASMMemZeroPage(pVCpu->h waccm.s.svm.pVMCB);228 pVCpu->hm.s.svm.pVMCB = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjVMCB); 229 pVCpu->hm.s.svm.pVMCBPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjVMCB, 0); 230 Assert(pVCpu->hm.s.svm.pVMCBPhys < _4G); 231 ASMMemZeroPage(pVCpu->hm.s.svm.pVMCB); 232 232 233 233 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */ 234 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */);234 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.pMemObjMSRBitmap, 2 << PAGE_SHIFT, false /* executable R0 mapping */); 235 235 if (RT_FAILURE(rc)) 236 236 return rc; 237 237 238 pVCpu->h waccm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap);239 pVCpu->h waccm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.svm.pMemObjMSRBitmap, 0);238 pVCpu->hm.s.svm.pMSRBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.pMemObjMSRBitmap); 239 pVCpu->hm.s.svm.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.pMemObjMSRBitmap, 0); 240 240 /* Set all bits to intercept all MSR accesses. */ 241 ASMMemFill32(pVCpu->h waccm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff);241 ASMMemFill32(pVCpu->hm.s.svm.pMSRBitmap, 2 << PAGE_SHIFT, 0xffffffff); 242 242 } 243 243 … … 258 258 PVMCPU pVCpu = &pVM->aCpus[i]; 259 259 260 if (pVCpu->h waccm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ)261 { 262 RTR0MemObjFree(pVCpu->h waccm.s.svm.pMemObjVMCBHost, false);263 pVCpu->h waccm.s.svm.pVMCBHost = 0;264 pVCpu->h waccm.s.svm.pVMCBHostPhys = 0;265 pVCpu->h waccm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ;266 } 267 268 if (pVCpu->h waccm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ)269 { 270 RTR0MemObjFree(pVCpu->h waccm.s.svm.pMemObjVMCB, false);271 pVCpu->h waccm.s.svm.pVMCB = 0;272 pVCpu->h waccm.s.svm.pVMCBPhys = 0;273 pVCpu->h waccm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ;274 } 275 if (pVCpu->h waccm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)276 { 277 RTR0MemObjFree(pVCpu->h waccm.s.svm.pMemObjMSRBitmap, false);278 pVCpu->h waccm.s.svm.pMSRBitmap = 0;279 pVCpu->h waccm.s.svm.pMSRBitmapPhys = 0;280 pVCpu->h waccm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;281 } 282 } 283 if (pVM->h waccm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ)284 { 285 RTR0MemObjFree(pVM->h waccm.s.svm.pMemObjIOBitmap, false);286 pVM->h waccm.s.svm.pIOBitmap = 0;287 pVM->h waccm.s.svm.pIOBitmapPhys = 0;288 pVM->h waccm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ;260 if (pVCpu->hm.s.svm.pMemObjVMCBHost != NIL_RTR0MEMOBJ) 261 { 262 RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCBHost, false); 263 pVCpu->hm.s.svm.pVMCBHost = 0; 264 pVCpu->hm.s.svm.pVMCBHostPhys = 0; 265 pVCpu->hm.s.svm.pMemObjVMCBHost = NIL_RTR0MEMOBJ; 266 } 267 268 if (pVCpu->hm.s.svm.pMemObjVMCB != NIL_RTR0MEMOBJ) 269 { 270 RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjVMCB, false); 271 pVCpu->hm.s.svm.pVMCB = 0; 272 pVCpu->hm.s.svm.pVMCBPhys = 0; 273 pVCpu->hm.s.svm.pMemObjVMCB = NIL_RTR0MEMOBJ; 274 } 275 if (pVCpu->hm.s.svm.pMemObjMSRBitmap != NIL_RTR0MEMOBJ) 276 { 277 RTR0MemObjFree(pVCpu->hm.s.svm.pMemObjMSRBitmap, false); 278 pVCpu->hm.s.svm.pMSRBitmap = 0; 279 pVCpu->hm.s.svm.pMSRBitmapPhys = 0; 280 pVCpu->hm.s.svm.pMemObjMSRBitmap = NIL_RTR0MEMOBJ; 281 } 282 } 283 if (pVM->hm.s.svm.pMemObjIOBitmap != NIL_RTR0MEMOBJ) 284 { 285 RTR0MemObjFree(pVM->hm.s.svm.pMemObjIOBitmap, false); 286 pVM->hm.s.svm.pIOBitmap = 0; 287 pVM->hm.s.svm.pIOBitmapPhys = 0; 288 pVM->hm.s.svm.pMemObjIOBitmap = NIL_RTR0MEMOBJ; 289 289 } 290 290 return VINF_SUCCESS; … … 303 303 304 304 AssertReturn(pVM, VERR_INVALID_PARAMETER); 305 Assert(pVM->h waccm.s.svm.fSupported);305 Assert(pVM->hm.s.svm.fSupported); 306 306 307 307 for (VMCPUID i = 0; i < pVM->cCpus; i++) 308 308 { 309 309 PVMCPU pVCpu = &pVM->aCpus[i]; 310 SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].h waccm.s.svm.pVMCB;310 SVM_VMCB *pVMCB = (SVM_VMCB *)pVM->aCpus[i].hm.s.svm.pVMCB; 311 311 312 312 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); … … 382 382 383 383 /* Set IO and MSR bitmap addresses. */ 384 pVMCB->ctrl.u64IOPMPhysAddr = pVM->h waccm.s.svm.pIOBitmapPhys;385 pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->h waccm.s.svm.pMSRBitmapPhys;384 pVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.pIOBitmapPhys; 385 pVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.pMSRBitmapPhys; 386 386 387 387 /* No LBR virtualization. */ … … 399 399 400 400 /* If nested paging is not in use, additional intercepts have to be set up. */ 401 if (!pVM->h waccm.s.fNestedPaging)401 if (!pVM->hm.s.fNestedPaging) 402 402 { 403 403 /* CR3 reads/writes must be intercepted; our shadow values are different from guest's. */ … … 448 448 { 449 449 unsigned ulBit; 450 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->h waccm.s.svm.pMSRBitmap;450 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.svm.pMSRBitmap; 451 451 452 452 if (ulMSR <= 0x00001FFF) … … 498 498 { 499 499 #ifdef VBOX_WITH_STATISTICS 500 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);500 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]); 501 501 #endif 502 502 … … 539 539 * Dispatch any pending interrupts (injected before, but a VM-exit occurred prematurely). 540 540 */ 541 if (pVCpu->h waccm.s.Event.fPending)541 if (pVCpu->hm.s.Event.fPending) 542 542 { 543 543 SVM_EVENT Event; 544 544 545 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->h waccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode,545 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hm.s.Event.intInfo, pVCpu->hm.s.Event.errCode, 546 546 (RTGCPTR)pCtx->rip)); 547 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntReinject);548 Event.au64[0] = pVCpu->h waccm.s.Event.intInfo;547 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject); 548 Event.au64[0] = pVCpu->hm.s.Event.intInfo; 549 549 hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event); 550 550 551 pVCpu->h waccm.s.Event.fPending = false;551 pVCpu->hm.s.Event.fPending = false; 552 552 return VINF_SUCCESS; 553 553 } … … 614 614 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */ 615 615 Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))); 616 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchGuestIrq);616 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 617 617 /* Just continue */ 618 618 } … … 681 681 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 682 682 683 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntInject);683 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 684 684 hmR0SvmInjectEvent(pVCpu, pVMCB, pCtx, &Event); 685 685 } /* if (interrupts can be dispatched) */ … … 724 724 725 725 /* Setup AMD SVM. */ 726 Assert(pVM->h waccm.s.svm.fSupported);727 728 pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;726 Assert(pVM->hm.s.svm.fSupported); 727 728 pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 729 729 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 730 730 731 731 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ 732 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)732 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS) 733 733 { 734 734 SVM_WRITE_SELREG(CS, cs); … … 741 741 742 742 /* Guest CPU context: LDTR. */ 743 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)743 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR) 744 744 { 745 745 SVM_WRITE_SELREG(LDTR, ldtr); … … 747 747 748 748 /* Guest CPU context: TR. */ 749 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)749 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR) 750 750 { 751 751 SVM_WRITE_SELREG(TR, tr); … … 753 753 754 754 /* Guest CPU context: GDTR. */ 755 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)755 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 756 756 { 757 757 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; … … 760 760 761 761 /* Guest CPU context: IDTR. */ 762 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)762 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 763 763 { 764 764 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; … … 774 774 775 775 /* Control registers */ 776 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)776 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 777 777 { 778 778 val = pCtx->cr0; … … 790 790 791 791 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */ 792 if (!pVCpu->h waccm.s.fFPUOldStyleOverride)792 if (!pVCpu->hm.s.fFPUOldStyleOverride) 793 793 { 794 794 pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF); 795 pVCpu->h waccm.s.fFPUOldStyleOverride = true;795 pVCpu->hm.s.fFPUOldStyleOverride = true; 796 796 } 797 797 } … … 806 806 * translation will remain active. 807 807 */ 808 if (!pVM->h waccm.s.fNestedPaging)808 if (!pVM->hm.s.fNestedPaging) 809 809 { 810 810 val |= X86_CR0_PG; /* Paging is always enabled; even when the guest is running in real mode or PE without paging. */ … … 816 816 pVMCB->guest.u64CR2 = pCtx->cr2; 817 817 818 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)818 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3) 819 819 { 820 820 /* Save our shadow CR3 register. */ 821 if (pVM->h waccm.s.fNestedPaging)821 if (pVM->hm.s.fNestedPaging) 822 822 { 823 823 PGMMODE enmShwPagingMode; … … 841 841 } 842 842 843 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)843 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4) 844 844 { 845 845 val = pCtx->cr4; 846 if (!pVM->h waccm.s.fNestedPaging)847 { 848 switch (pVCpu->h waccm.s.enmShadowMode)846 if (!pVM->hm.s.fNestedPaging) 847 { 848 switch (pVCpu->hm.s.enmShadowMode) 849 849 { 850 850 case PGMMODE_REAL: … … 881 881 882 882 /* Debug registers. */ 883 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)883 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 884 884 { 885 885 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */ … … 916 916 && !DBGFIsStepping(pVCpu)) 917 917 { 918 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxArmed);918 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 919 919 920 920 /* Disable drx move intercepts. */ … … 948 948 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 949 949 #elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 950 pVCpu->h waccm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64;950 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMSwitcherRun64; 951 951 #else 952 952 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 953 if (!pVM->h waccm.s.fAllow64BitGuests)953 if (!pVM->hm.s.fAllow64BitGuests) 954 954 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 955 955 # endif 956 pVCpu->h waccm.s.svm.pfnVMRun = SVMR0VMRun64;957 #endif 958 /* Unconditionally update these as wrmsr might have changed them. (H WACCM_CHANGED_GUEST_SEGMENT_REGS will not be set) */956 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun64; 957 #endif 958 /* Unconditionally update these as wrmsr might have changed them. (HM_CHANGED_GUEST_SEGMENT_REGS will not be set) */ 959 959 pVMCB->guest.FS.u64Base = pCtx->fs.u64Base; 960 960 pVMCB->guest.GS.u64Base = pCtx->gs.u64Base; … … 965 965 pVMCB->guest.u64EFER &= ~MSR_K6_EFER_LME; 966 966 967 pVCpu->h waccm.s.svm.pfnVMRun = SVMR0VMRun;967 pVCpu->hm.s.svm.pfnVMRun = SVMR0VMRun; 968 968 } 969 969 … … 976 976 pVMCB->ctrl.u32InterceptCtrl1 &= ~SVM_CTRL1_INTERCEPT_RDTSC; 977 977 pVMCB->ctrl.u32InterceptCtrl2 &= ~SVM_CTRL2_INTERCEPT_RDTSCP; 978 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCOffset);978 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset); 979 979 } 980 980 else … … 986 986 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 987 987 pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 988 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCInterceptOverFlow);988 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow); 989 989 } 990 990 } … … 993 993 pVMCB->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_RDTSC; 994 994 pVMCB->ctrl.u32InterceptCtrl2 |= SVM_CTRL2_INTERCEPT_RDTSCP; 995 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCIntercept);995 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept); 996 996 } 997 997 … … 1013 1013 1014 1014 /* Done. */ 1015 pVCpu->h waccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;1015 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST; 1016 1016 1017 1017 return VINF_SUCCESS; … … 1032 1032 AssertPtr(pVCpu); 1033 1033 1034 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;1035 pCpu = H WACCMR0GetCurrentCpu();1034 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 1035 pCpu = HMR0GetCurrentCpu(); 1036 1036 1037 1037 /* … … 1042 1042 */ 1043 1043 bool fNewASID = false; 1044 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu1045 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)1046 { 1047 pVCpu->h waccm.s.fForceTLBFlush = true;1044 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 1045 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 1046 { 1047 pVCpu->hm.s.fForceTLBFlush = true; 1048 1048 fNewASID = true; 1049 1049 } … … 1052 1052 * Set TLB flush state as checked until we return from the world switch. 1053 1053 */ 1054 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, true);1054 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); 1055 1055 1056 1056 /* … … 1058 1058 */ 1059 1059 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 1060 pVCpu->h waccm.s.fForceTLBFlush = true;1061 1062 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;1060 pVCpu->hm.s.fForceTLBFlush = true; 1061 1062 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 1063 1063 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_NOTHING; 1064 1064 1065 if (RT_UNLIKELY(pVM->h waccm.s.svm.fAlwaysFlushTLB))1065 if (RT_UNLIKELY(pVM->hm.s.svm.fAlwaysFlushTLB)) 1066 1066 { 1067 1067 /* … … 1069 1069 */ 1070 1070 pCpu->uCurrentASID = 1; 1071 pVCpu->h waccm.s.uCurrentASID = 1;1072 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;1071 pVCpu->hm.s.uCurrentASID = 1; 1072 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 1073 1073 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1074 1074 } 1075 else if (pVCpu->h waccm.s.fForceTLBFlush)1075 else if (pVCpu->hm.s.fForceTLBFlush) 1076 1076 { 1077 1077 if (fNewASID) … … 1079 1079 ++pCpu->uCurrentASID; 1080 1080 bool fHitASIDLimit = false; 1081 if (pCpu->uCurrentASID >= pVM->h waccm.s.uMaxASID)1081 if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID) 1082 1082 { 1083 1083 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */ … … 1085 1085 fHitASIDLimit = true; 1086 1086 1087 if (pVM->h waccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)1087 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1088 1088 { 1089 1089 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; … … 1100 1100 && pCpu->fFlushASIDBeforeUse) 1101 1101 { 1102 if (pVM->h waccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)1102 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1103 1103 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1104 1104 else … … 1109 1109 } 1110 1110 1111 pVCpu->h waccm.s.uCurrentASID = pCpu->uCurrentASID;1112 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;1111 pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID; 1112 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 1113 1113 } 1114 1114 else 1115 1115 { 1116 if (pVM->h waccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID)1116 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) 1117 1117 pVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1118 1118 else … … 1120 1120 } 1121 1121 1122 pVCpu->h waccm.s.fForceTLBFlush = false;1122 pVCpu->hm.s.fForceTLBFlush = false; 1123 1123 } 1124 1124 else 1125 1125 { 1126 1126 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 1127 * not be executed. See h waccmQueueInvlPage() where it is commented1127 * not be executed. See hmQueueInvlPage() where it is commented 1128 1128 * out. Support individual entry flushing someday. */ 1129 1129 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 1130 1130 { 1131 1131 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1132 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdown);1133 for (unsigned i = 0; i < pVCpu->h waccm.s.TlbShootdown.cPages; i++)1134 SVMR0InvlpgA(pVCpu->h waccm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);1135 } 1136 } 1137 1138 pVCpu->h waccm.s.TlbShootdown.cPages = 0;1132 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 1133 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 1134 SVMR0InvlpgA(pVCpu->hm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID); 1135 } 1136 } 1137 1138 pVCpu->hm.s.TlbShootdown.cPages = 0; 1139 1139 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1140 1140 1141 1141 /* Update VMCB with the ASID. */ 1142 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->h waccm.s.uCurrentASID;1143 1144 AssertMsg(pVCpu->h waccm.s.cTLBFlushes == pCpu->cTLBFlushes,1145 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));1146 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->h waccm.s.uMaxASID,1142 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentASID; 1143 1144 AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes, 1145 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1146 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID, 1147 1147 ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 1148 AssertMsg(pVCpu->h waccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,1149 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->h waccm.s.uCurrentASID));1148 AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID, 1149 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID)); 1150 1150 1151 1151 #ifdef VBOX_WITH_STATISTICS 1152 1152 if (pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_NOTHING) 1153 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);1153 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 1154 1154 else if ( pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT 1155 1155 || pVMCB->ctrl.TLBCtrl.n.u8TLBFlush == SVM_TLB_FLUSH_SINGLE_CONTEXT_RETAIN_GLOBALS) 1156 1156 { 1157 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);1157 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 1158 1158 } 1159 1159 else 1160 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBWorldSwitch);1160 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch); 1161 1161 #endif 1162 1162 } … … 1173 1173 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1174 1174 { 1175 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatEntry, x);1176 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit1);1177 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit2);1175 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 1176 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 1177 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 1178 1178 1179 1179 VBOXSTRICTRC rc = VINF_SUCCESS; … … 1194 1194 #endif 1195 1195 1196 pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;1196 pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 1197 1197 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 1198 1198 … … 1201 1201 */ 1202 1202 ResumeExecution: 1203 if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->h waccm.s.StatEntry))1204 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);1205 Assert(!H WACCMR0SuspendPending());1203 if (!STAM_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry)) 1204 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x); 1205 Assert(!HMR0SuspendPending()); 1206 1206 1207 1207 /* 1208 1208 * Safety precaution; looping for too long here can have a very bad effect on the host. 1209 1209 */ 1210 if (RT_UNLIKELY(++cResume > pVM->h waccm.s.cMaxResumeLoops))1211 { 1212 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMaxResume);1210 if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops)) 1211 { 1212 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 1213 1213 rc = VINF_EM_RAW_INTERRUPT; 1214 1214 goto end; … … 1256 1256 * Check for pending actions that force us to go back to ring-3. 1257 1257 */ 1258 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)1258 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 1259 1259 || VMCPU_FF_ISPENDING(pVCpu, 1260 VMCPU_FF_H WACCM_TO_R3_MASK1260 VMCPU_FF_HM_TO_R3_MASK 1261 1261 | VMCPU_FF_PGM_SYNC_CR3 1262 1262 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL … … 1280 1280 #endif 1281 1281 { 1282 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK)1283 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_H WACCM_TO_R3_MASK))1282 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK) 1283 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 1284 1284 { 1285 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchToR3);1285 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3); 1286 1286 rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 1287 1287 goto end; … … 1326 1326 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 1327 1327 { 1328 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPreemptPending);1328 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending); 1329 1329 rc = VINF_EM_RAW_INTERRUPT; 1330 1330 goto end; … … 1348 1348 /** @todo query and update the TPR only when it could have been changed (mmio access) 1349 1349 */ 1350 if (pVM->h waccm.s.fHasIoApic)1350 if (pVM->hm.s.fHasIoApic) 1351 1351 { 1352 1352 /* TPR caching in CR8 */ … … 1355 1355 AssertRC(rc2); 1356 1356 1357 if (pVM->h waccm.s.fTPRPatchingActive)1357 if (pVM->hm.s.fTPRPatchingActive) 1358 1358 { 1359 1359 /* Our patch code uses LSTAR for TPR caching. */ … … 1399 1399 1400 1400 /* Enable nested paging if necessary (disabled each time after #VMEXIT). */ 1401 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->h waccm.s.fNestedPaging;1401 pVMCB->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging; 1402 1402 1403 1403 #ifdef LOG_ENABLED 1404 pCpu = H WACCMR0GetCurrentCpu();1405 if (pVCpu->h waccm.s.idLastCpu != pCpu->idCpu)1406 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->h waccm.s.idLastCpu, pCpu->idCpu));1407 else if (pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)1408 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));1404 pCpu = HMR0GetCurrentCpu(); 1405 if (pVCpu->hm.s.idLastCpu != pCpu->idCpu) 1406 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu)); 1407 else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 1408 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1409 1409 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) 1410 1410 LogFlow(("Manual TLB flush\n")); … … 1438 1438 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 1439 1439 #endif 1440 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);1440 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 1441 1441 1442 1442 /* Setup TLB control and ASID in the VMCB. */ … … 1444 1444 1445 1445 /* In case we execute a goto ResumeExecution later on. */ 1446 pVCpu->h waccm.s.fResumeVM = true;1447 pVCpu->h waccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;1448 1449 Assert(sizeof(pVCpu->h waccm.s.svm.pVMCBPhys) == 8);1446 pVCpu->hm.s.fResumeVM = true; 1447 pVCpu->hm.s.fForceTLBFlush = pVM->hm.s.svm.fAlwaysFlushTLB; 1448 1449 Assert(sizeof(pVCpu->hm.s.svm.pVMCBPhys) == 8); 1450 1450 Assert(pVMCB->ctrl.IntCtrl.n.u1VIrqMasking); 1451 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->h waccm.s.svm.pIOBitmapPhys);1452 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->h waccm.s.svm.pMSRBitmapPhys);1451 Assert(pVMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.pIOBitmapPhys); 1452 Assert(pVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.pMSRBitmapPhys); 1453 1453 Assert(pVMCB->ctrl.u64LBRVirt == 0); 1454 1454 … … 1462 1462 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 1463 1463 */ 1464 u32HostExtFeatures = pVM->h waccm.s.cpuid.u32AMDFeatureEDX;1464 u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX; 1465 1465 if ( (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1466 1466 && !(pVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP)) 1467 1467 { 1468 pVCpu->h waccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);1468 pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX); 1469 1469 uint64_t u64GuestTSCAux = 0; 1470 1470 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux); … … 1474 1474 1475 1475 #ifdef VBOX_WITH_KERNEL_USING_XMM 1476 h waccmR0SVMRunWrapXMM(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu,1477 pVCpu->h waccm.s.svm.pfnVMRun);1476 hmR0SVMRunWrapXMM(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu, 1477 pVCpu->hm.s.svm.pfnVMRun); 1478 1478 #else 1479 pVCpu->h waccm.s.svm.pfnVMRun(pVCpu->hwaccm.s.svm.pVMCBHostPhys, pVCpu->hwaccm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu);1480 #endif 1481 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, false);1482 ASMAtomicIncU32(&pVCpu->h waccm.s.cWorldSwitchExits);1479 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.pVMCBHostPhys, pVCpu->hm.s.svm.pVMCBPhys, pCtx, pVM, pVCpu); 1480 #endif 1481 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); 1482 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); 1483 1483 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 1484 1484 if (!(pVMCB->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) … … 1486 1486 /* Restore host's TSC_AUX. */ 1487 1487 if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1488 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->h waccm.s.u64HostTSCAux);1488 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux); 1489 1489 1490 1490 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + … … 1493 1493 TMNotifyEndOfExecution(pVCpu); 1494 1494 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); 1495 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);1495 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); 1496 1496 ASMSetFlags(uOldEFlags); 1497 1497 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION … … 1510 1510 if (RT_UNLIKELY(exitCode == (uint64_t)SVM_EXIT_INVALID)) /* Invalid guest state. */ 1511 1511 { 1512 H WACCMDumpRegs(pVM, pVCpu, pCtx);1512 HMDumpRegs(pVM, pVCpu, pCtx); 1513 1513 #ifdef DEBUG 1514 1514 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx)); … … 1713 1713 * unless in the nested paging case where CR3 can be changed by the guest. 1714 1714 */ 1715 if ( pVM->h waccm.s.fNestedPaging1715 if ( pVM->hm.s.fNestedPaging 1716 1716 && pCtx->cr3 != pVMCB->guest.u64CR3) 1717 1717 { … … 1740 1740 1741 1741 /* Check if an injected event was interrupted prematurely. */ 1742 pVCpu->h waccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];1742 pVCpu->hm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0]; 1743 1743 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid 1744 1744 /* we don't care about 'int xx' as the instruction will be restarted. */ 1745 1745 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT) 1746 1746 { 1747 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->h waccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));1747 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode)); 1748 1748 1749 1749 #ifdef LOG_ENABLED 1750 1750 SVM_EVENT Event; 1751 Event.au64[0] = pVCpu->h waccm.s.Event.intInfo;1751 Event.au64[0] = pVCpu->hm.s.Event.intInfo; 1752 1752 1753 1753 if ( exitCode == SVM_EXIT_EXCEPTION_E … … 1758 1758 #endif 1759 1759 1760 pVCpu->h waccm.s.Event.fPending = true;1760 pVCpu->hm.s.Event.fPending = true; 1761 1761 /* Error code present? (redundant) */ 1762 1762 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid) 1763 pVCpu->h waccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;1763 pVCpu->hm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode; 1764 1764 else 1765 pVCpu->h waccm.s.Event.errCode = 0;1765 pVCpu->hm.s.Event.errCode = 0; 1766 1766 } 1767 1767 #ifdef VBOX_WITH_STATISTICS 1768 1768 if (exitCode == SVM_EXIT_NPF) 1769 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitReasonNPF);1769 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF); 1770 1770 else 1771 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]);1771 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitCode & MASK_EXITREASON_STAT]); 1772 1772 #endif 1773 1773 … … 1775 1775 if (fSyncTPR) 1776 1776 { 1777 if (pVM->h waccm.s.fTPRPatchingActive)1777 if (pVM->hm.s.fTPRPatchingActive) 1778 1778 { 1779 1779 if ((pCtx->msrLSTAR & 0xff) != u8LastTPR) … … 1804 1804 pVMCB->ctrl.ExitIntInfo.au64[0], UINT64_MAX); 1805 1805 #endif 1806 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);1806 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 1807 1807 1808 1808 /* Deal with the reason of the VM-exit. */ … … 1827 1827 case X86_XCPT_DB: 1828 1828 { 1829 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDB);1829 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 1830 1830 1831 1831 /* Note that we don't support guest and host-initiated debugging at the same time. */ … … 1861 1861 { 1862 1862 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 1863 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowNM);1863 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 1864 1864 1865 1865 /* Continue execution. */ 1866 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;1866 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1867 1867 1868 1868 goto ResumeExecution; … … 1870 1870 1871 1871 Log(("Forward #NM fault to the guest\n")); 1872 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNM);1872 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 1873 1873 1874 1874 Event.au64[0] = 0; … … 1887 1887 1888 1888 #ifdef VBOX_ALWAYS_TRAP_PF 1889 if (pVM->h waccm.s.fNestedPaging)1889 if (pVM->hm.s.fNestedPaging) 1890 1890 { 1891 1891 /* … … 1894 1894 Log(("Guest page fault at %04X:%RGv cr2=%RGv error code %x rsp=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, 1895 1895 uFaultAddress, errCode, (RTGCPTR)pCtx->rsp)); 1896 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);1896 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 1897 1897 1898 1898 /* Now we must update CR2. */ … … 1910 1910 } 1911 1911 #endif 1912 Assert(!pVM->h waccm.s.fNestedPaging);1913 1914 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING1912 Assert(!pVM->hm.s.fNestedPaging); 1913 1914 #ifdef VBOX_HM_WITH_GUEST_PATCHING 1915 1915 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 1916 if ( pVM->h waccm.s.fTRPPatchingAllowed1916 if ( pVM->hm.s.fTRPPatchingAllowed 1917 1917 && (uFaultAddress & 0xfff) == 0x080 1918 1918 && !(errCode & X86_TRAP_PF_P) /* not present */ 1919 1919 && CPUMGetGuestCPL(pVCpu) == 0 1920 1920 && !CPUMIsGuestInLongModeEx(pCtx) 1921 && pVM->h waccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))1921 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 1922 1922 { 1923 1923 RTGCPHYS GCPhysApicBase, GCPhys; … … 1930 1930 { 1931 1931 /* Only attempt to patch the instruction once. */ 1932 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);1932 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 1933 1933 if (!pPatch) 1934 1934 { 1935 rc = VINF_EM_H WACCM_PATCH_TPR_INSTR;1935 rc = VINF_EM_HM_PATCH_TPR_INSTR; 1936 1936 break; 1937 1937 } … … 1953 1953 /* We've successfully synced our shadow pages, so let's just continue execution. */ 1954 1954 Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, uFaultAddress, errCode)); 1955 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPF);1955 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 1956 1956 1957 1957 TRPMResetTrap(pVCpu); … … 1964 1964 */ 1965 1965 Log2(("Forward page fault to the guest\n")); 1966 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);1966 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 1967 1967 /* The error code might have been changed. */ 1968 1968 errCode = TRPMGetErrorCode(pVCpu); … … 1994 1994 case X86_XCPT_MF: /* Floating point exception. */ 1995 1995 { 1996 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestMF);1996 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 1997 1997 if (!(pCtx->cr0 & X86_CR0_NE)) 1998 1998 { … … 2029 2029 { 2030 2030 case X86_XCPT_GP: 2031 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestGP);2031 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 2032 2032 Event.n.u1ErrorCodeValid = 1; 2033 2033 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ … … 2037 2037 break; 2038 2038 case X86_XCPT_DE: 2039 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDE);2039 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); 2040 2040 break; 2041 2041 case X86_XCPT_UD: 2042 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestUD);2042 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); 2043 2043 break; 2044 2044 case X86_XCPT_SS: 2045 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestSS);2045 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); 2046 2046 Event.n.u1ErrorCodeValid = 1; 2047 2047 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ 2048 2048 break; 2049 2049 case X86_XCPT_NP: 2050 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNP);2050 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); 2051 2051 Event.n.u1ErrorCodeValid = 1; 2052 2052 Event.n.u32ErrorCode = pVMCB->ctrl.u64ExitInfo1; /* EXITINFO1 = error code */ … … 2074 2074 PGMMODE enmShwPagingMode; 2075 2075 2076 Assert(pVM->h waccm.s.fNestedPaging);2076 Assert(pVM->hm.s.fNestedPaging); 2077 2077 LogFlow(("Nested page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode)); 2078 2078 2079 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING2079 #ifdef VBOX_HM_WITH_GUEST_PATCHING 2080 2080 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 2081 if ( pVM->h waccm.s.fTRPPatchingAllowed2081 if ( pVM->hm.s.fTRPPatchingAllowed 2082 2082 && (GCPhysFault & PAGE_OFFSET_MASK) == 0x080 2083 2083 && ( !(errCode & X86_TRAP_PF_P) /* not present */ … … 2085 2085 && CPUMGetGuestCPL(pVCpu) == 0 2086 2086 && !CPUMIsGuestInLongModeEx(pCtx) 2087 && pVM->h waccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))2087 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 2088 2088 { 2089 2089 RTGCPHYS GCPhysApicBase; … … 2094 2094 { 2095 2095 /* Only attempt to patch the instruction once. */ 2096 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);2096 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2097 2097 if (!pPatch) 2098 2098 { 2099 rc = VINF_EM_H WACCM_PATCH_TPR_INSTR;2099 rc = VINF_EM_HM_PATCH_TPR_INSTR; 2100 2100 break; 2101 2101 } … … 2153 2153 /* We've successfully synced our shadow pages, so let's just continue execution. */ 2154 2154 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, GCPhysFault, errCode)); 2155 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPF);2155 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 2156 2156 2157 2157 TRPMResetTrap(pVCpu); … … 2186 2186 case SVM_EXIT_WBINVD: 2187 2187 case SVM_EXIT_INVD: /* Guest software attempted to execute INVD. */ 2188 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvd);2188 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd); 2189 2189 /* Skip instruction and continue directly. */ 2190 2190 pCtx->rip += 2; /* Note! hardcoded opcode size! */ … … 2195 2195 { 2196 2196 Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax)); 2197 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCpuid);2197 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 2198 2198 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2199 2199 if (rc == VINF_SUCCESS) … … 2211 2211 { 2212 2212 Log2(("SVM: Rdtsc\n")); 2213 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtsc);2213 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 2214 2214 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2215 2215 if (rc == VINF_SUCCESS) … … 2226 2226 { 2227 2227 Log2(("SVM: Rdpmc %x\n", pCtx->ecx)); 2228 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdpmc);2228 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc); 2229 2229 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2230 2230 if (rc == VINF_SUCCESS) … … 2241 2241 { 2242 2242 Log2(("SVM: Rdtscp\n")); 2243 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtscp);2243 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 2244 2244 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 2245 2245 if (rc == VINF_SUCCESS) … … 2257 2257 { 2258 2258 Log2(("SVM: invlpg\n")); 2259 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvlpg);2260 2261 Assert(!pVM->h waccm.s.fNestedPaging);2259 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg); 2260 2261 Assert(!pVM->hm.s.fNestedPaging); 2262 2262 2263 2263 /* Truly a pita. Why can't SVM give the same information as VT-x? */ … … 2265 2265 if (rc == VINF_SUCCESS) 2266 2266 { 2267 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushPageInvlpg);2267 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushPageInvlpg); 2268 2268 goto ResumeExecution; /* eip already updated */ 2269 2269 } … … 2277 2277 { 2278 2278 Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0)); 2279 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]);2279 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]); 2280 2280 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); 2281 2281 … … 2283 2283 { 2284 2284 case 0: 2285 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;2285 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 2286 2286 break; 2287 2287 case 2: 2288 2288 break; 2289 2289 case 3: 2290 Assert(!pVM->h waccm.s.fNestedPaging);2291 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;2290 Assert(!pVM->hm.s.fNestedPaging); 2291 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 2292 2292 break; 2293 2293 case 4: 2294 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;2294 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4; 2295 2295 break; 2296 2296 case 8: … … 2315 2315 { 2316 2316 Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0)); 2317 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]);2317 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]); 2318 2318 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); 2319 2319 if (rc == VINF_SUCCESS) … … 2333 2333 { 2334 2334 Log2(("SVM: %RGv mov dr%d, x\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_DR0)); 2335 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxWrite);2335 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 2336 2336 2337 2337 if ( !DBGFIsStepping(pVCpu) 2338 2338 && !CPUMIsHyperDebugStateActive(pVCpu)) 2339 2339 { 2340 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxContextSwitch);2340 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 2341 2341 2342 2342 /* Disable drx move intercepts. */ … … 2354 2354 { 2355 2355 /* EIP has been updated already. */ 2356 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;2356 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 2357 2357 2358 2358 /* Only resume if successful. */ … … 2369 2369 { 2370 2370 Log2(("SVM: %RGv mov x, dr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_DR0)); 2371 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxRead);2371 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 2372 2372 2373 2373 if (!DBGFIsStepping(pVCpu)) 2374 2374 { 2375 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxContextSwitch);2375 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 2376 2376 2377 2377 /* Disable DRx move intercepts. */ … … 2415 2415 { 2416 2416 /* ins/outs */ 2417 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;2417 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 2418 2418 2419 2419 /* Disassemble manually to deal with segment prefixes. */ … … 2424 2424 { 2425 2425 Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize)); 2426 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringWrite);2426 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 2427 2427 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 2428 2428 (DISCPUMODE)pDis->uAddrMode, uIOSize); … … 2431 2431 { 2432 2432 Log2(("IOMInterpretINSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, uIOSize)); 2433 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringRead);2433 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 2434 2434 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), IoExitInfo.n.u16Port, pDis->fPrefix, 2435 2435 (DISCPUMODE)pDis->uAddrMode, uIOSize); … … 2448 2448 Log2(("IOMIOPortWrite %RGv %x %x size=%d\n", (RTGCPTR)pCtx->rip, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, 2449 2449 uIOSize)); 2450 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOWrite);2450 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 2451 2451 rc = IOMIOPortWrite(pVM, IoExitInfo.n.u16Port, pCtx->eax & uAndVal, uIOSize); 2452 2452 if (rc == VINF_IOM_R3_IOPORT_WRITE) 2453 2453 { 2454 H WACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2454 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2455 2455 uAndVal, uIOSize); 2456 2456 } … … 2460 2460 uint32_t u32Val = 0; 2461 2461 2462 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIORead);2462 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 2463 2463 rc = IOMIOPortRead(pVM, IoExitInfo.n.u16Port, &u32Val, uIOSize); 2464 2464 if (IOM_SUCCESS(rc)) … … 2471 2471 else if (rc == VINF_IOM_R3_IOPORT_READ) 2472 2472 { 2473 H WACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port,2473 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pVMCB->ctrl.u64ExitInfo2, IoExitInfo.n.u16Port, 2474 2474 uAndVal, uIOSize); 2475 2475 } … … 2493 2493 static uint32_t const aIOSize[4] = { 1, 2, 0, 4 }; 2494 2494 2495 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxIOCheck);2495 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck); 2496 2496 for (unsigned i = 0; i < 4; i++) 2497 2497 { … … 2566 2566 case SVM_EXIT_HLT: 2567 2567 /* Check if external interrupts are pending; if so, don't switch back. */ 2568 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitHlt);2568 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 2569 2569 pCtx->rip++; /* skip hlt */ 2570 2570 if (EMShouldContinueAfterHalt(pVCpu, pCtx)) … … 2576 2576 case SVM_EXIT_MWAIT_UNCOND: 2577 2577 Log2(("SVM: mwait\n")); 2578 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMwait);2578 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait); 2579 2579 rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2580 2580 if ( rc == VINF_EM_HALT … … 2598 2598 Log2(("SVM: monitor\n")); 2599 2599 2600 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMonitor);2600 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor); 2601 2601 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2602 2602 if (rc == VINF_SUCCESS) … … 2644 2644 { 2645 2645 /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */ 2646 if ( pVM->h waccm.s.fTPRPatchingActive2646 if ( pVM->hm.s.fTPRPatchingActive 2647 2647 && pCtx->ecx == MSR_K8_LSTAR 2648 2648 && pVMCB->ctrl.u64ExitInfo1 == 1 /* wrmsr */) … … 2668 2668 * so we play safe by completely disassembling the instruction. 2669 2669 */ 2670 STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->h waccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);2670 STAM_COUNTER_INC((pVMCB->ctrl.u64ExitInfo1 == 0) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr); 2671 2671 Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr")); 2672 2672 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0); … … 2685 2685 Log(("SVM_EXIT_TASK_SWITCH: exit2=%RX64\n", pVMCB->ctrl.u64ExitInfo2)); 2686 2686 if ( !(pVMCB->ctrl.u64ExitInfo2 & (SVM_EXIT2_TASK_SWITCH_IRET | SVM_EXIT2_TASK_SWITCH_JMP)) 2687 && pVCpu->h waccm.s.Event.fPending)2687 && pVCpu->hm.s.Event.fPending) 2688 2688 { 2689 2689 SVM_EVENT Event; 2690 Event.au64[0] = pVCpu->h waccm.s.Event.intInfo;2690 Event.au64[0] = pVCpu->hm.s.Event.intInfo; 2691 2691 2692 2692 /* Caused by an injected interrupt. */ 2693 pVCpu->h waccm.s.Event.fPending = false;2693 pVCpu->hm.s.Event.fPending = false; 2694 2694 switch (Event.n.u3Type) 2695 2695 { … … 2758 2758 if (exitCode == SVM_EXIT_INTR) 2759 2759 { 2760 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatPendingHostIrq);2760 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 2761 2761 /* On the next entry we'll only sync the host context. */ 2762 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;2762 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; 2763 2763 } 2764 2764 else … … 2767 2767 /** @todo we can do better than this */ 2768 2768 /* Not in the VINF_PGM_CHANGE_MODE though! */ 2769 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;2769 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL; 2770 2770 } 2771 2771 … … 2783 2783 #endif 2784 2784 2785 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2, x);2786 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit1, x);2787 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatEntry, x);2785 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 2786 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 2787 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 2788 2788 return VBOXSTRICTRC_TODO(rc); 2789 2789 } … … 2809 2809 uint8_t u8Tpr; 2810 2810 2811 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);2811 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2812 2812 if (!pPatch) 2813 2813 break; … … 2815 2815 switch (pPatch->enmType) 2816 2816 { 2817 case H WACCMTPRINSTR_READ:2817 case HMTPRINSTR_READ: 2818 2818 /* TPR caching in CR8 */ 2819 2819 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPending); … … 2827 2827 break; 2828 2828 2829 case H WACCMTPRINSTR_WRITE_REG:2830 case H WACCMTPRINSTR_WRITE_IMM:2829 case HMTPRINSTR_WRITE_REG: 2830 case HMTPRINSTR_WRITE_IMM: 2831 2831 /* Fetch the new TPR value */ 2832 if (pPatch->enmType == H WACCMTPRINSTR_WRITE_REG)2832 if (pPatch->enmType == HMTPRINSTR_WRITE_REG) 2833 2833 { 2834 2834 uint32_t val; … … 2865 2865 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 2866 2866 { 2867 Assert(pVM->h waccm.s.svm.fSupported);2868 2869 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->h waccm.s.idLastCpu, pVCpu->hwaccm.s.uCurrentASID));2870 pVCpu->h waccm.s.fResumeVM = false;2867 Assert(pVM->hm.s.svm.fSupported); 2868 2869 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentASID)); 2870 pVCpu->hm.s.fResumeVM = false; 2871 2871 2872 2872 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */ 2873 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;2873 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_LDTR; 2874 2874 2875 2875 return VINF_SUCCESS; … … 2887 2887 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2888 2888 { 2889 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;2890 2891 Assert(pVM->h waccm.s.svm.fSupported);2889 SVM_VMCB *pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 2890 2891 Assert(pVM->hm.s.svm.fSupported); 2892 2892 2893 2893 #ifdef DEBUG … … 2908 2908 2909 2909 /* Resync the debug registers the next time. */ 2910 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;2910 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 2911 2911 } 2912 2912 else … … 2979 2979 if (CPUMGetGuestCodeBits(pVCpu) != 16) 2980 2980 { 2981 PDISSTATE pDis = &pVCpu->h waccm.s.DisState;2981 PDISSTATE pDis = &pVCpu->hm.s.DisState; 2982 2982 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL); 2983 2983 if (RT_SUCCESS(rc) && pDis->pCurInstr->uOpcode == OP_INVLPG) … … 3003 3003 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 3004 3004 { 3005 bool fFlushPending = pVM->h waccm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);3005 bool fFlushPending = pVM->hm.s.svm.fAlwaysFlushTLB | VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH); 3006 3006 3007 3007 /* Skip it if a TLB flush is already pending. */ … … 3012 3012 Log2(("SVMR0InvalidatePage %RGv\n", GCVirt)); 3013 3013 AssertReturn(pVM, VERR_INVALID_PARAMETER); 3014 Assert(pVM->h waccm.s.svm.fSupported);3015 3016 pVMCB = (SVM_VMCB *)pVCpu->h waccm.s.svm.pVMCB;3014 Assert(pVM->hm.s.svm.fSupported); 3015 3016 pVMCB = (SVM_VMCB *)pVCpu->hm.s.svm.pVMCB; 3017 3017 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_HMSVM_INVALID_PVMCB); 3018 3018 … … 3040 3040 VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys) 3041 3041 { 3042 Assert(pVM->h waccm.s.fNestedPaging);3042 Assert(pVM->hm.s.fNestedPaging); 3043 3043 /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */ 3044 3044 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 3045 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBInvlpga);3045 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBInvlpga); 3046 3046 return VINF_SUCCESS; 3047 3047 } … … 3069 3069 aParam[3] = (uint32_t)(pVMCBPhys >> 32); /* Param 2: pVMCBPhys - Hi. */ 3070 3070 3071 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnSVMGCVMRun64, 4, &aParam[0]);3071 return SVMR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnSVMGCVMRun64, 4, &aParam[0]); 3072 3072 } 3073 3073 … … 3105 3105 CPUMPushHyper(pVCpu, paParam[i]); 3106 3106 3107 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatWorldSwitch3264, z);3107 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 3108 3108 /* Call switcher. */ 3109 rc = pVM->h waccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));3110 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatWorldSwitch3264, z);3109 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); 3110 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 3111 3111 3112 3112 ASMSetFlags(uOldEFlags); -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.h
r43307 r43387 24 24 #include <VBox/vmm/stam.h> 25 25 #include <VBox/dis.h> 26 #include <VBox/vmm/h waccm.h>26 #include <VBox/vmm/hm.h> 27 27 #include <VBox/vmm/pgm.h> 28 #include <VBox/vmm/h wacc_svm.h>28 #include <VBox/vmm/hm_svm.h> 29 29 30 30 RT_C_DECLS_BEGIN -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r43379 r43387 20 20 * Header Files * 21 21 *******************************************************************************/ 22 #define LOG_GROUP LOG_GROUP_H WACCM22 #define LOG_GROUP LOG_GROUP_HM 23 23 #include <iprt/asm-amd64-x86.h> 24 #include <VBox/vmm/h waccm.h>24 #include <VBox/vmm/hm.h> 25 25 #include <VBox/vmm/pgm.h> 26 26 #include <VBox/vmm/dbgf.h> … … 32 32 #endif 33 33 #include <VBox/vmm/tm.h> 34 #include "H WACCMInternal.h"34 #include "HMInternal.h" 35 35 #include <VBox/vmm/vm.h> 36 36 #include <VBox/vmm/pdmapi.h> … … 70 70 71 71 #ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 72 /** See H WACCMR0A.asm. */72 /** See HMR0A.asm. */ 73 73 extern "C" uint32_t g_fVMXIs64bitHost; 74 74 #endif … … 90 90 91 91 /** 92 * Updates error from VMCS to H WACCMCPU's lasterror record.92 * Updates error from VMCS to HMCPU's lasterror record. 93 93 * 94 94 * @param pVM Pointer to the VM. … … 103 103 104 104 VMXReadVMCS(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError); 105 pVCpu->h waccm.s.vmx.lasterror.ulInstrError = instrError;106 } 107 pVM->h waccm.s.lLastError = rc;105 pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError; 106 } 107 pVM->hm.s.lLastError = rc; 108 108 } 109 109 … … 130 130 { 131 131 /* Set revision dword at the beginning of the VMXON structure. */ 132 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->h waccm.s.vmx.msr.vmx_basic_info);132 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info); 133 133 } 134 134 … … 165 165 */ 166 166 if ( pVM 167 && pVM->h waccm.s.vmx.fVPID168 && (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS))167 && pVM->hm.s.vmx.fVPID 168 && (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)) 169 169 { 170 170 hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */); … … 224 224 #endif 225 225 226 pVM->h waccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;227 228 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)226 pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ; 227 228 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 229 229 { 230 230 /* Allocate one page for the APIC physical page (serves for filtering accesses). */ 231 rc = RTR0MemObjAllocCont(&pVM->h waccm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */);231 rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjAPIC, PAGE_SIZE, false /* executable R0 mapping */); 232 232 AssertRC(rc); 233 233 if (RT_FAILURE(rc)) 234 234 return rc; 235 235 236 pVM->h waccm.s.vmx.pAPIC = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjAPIC);237 pVM->h waccm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjAPIC, 0);238 ASMMemZero32(pVM->h waccm.s.vmx.pAPIC, PAGE_SIZE);236 pVM->hm.s.vmx.pAPIC = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjAPIC); 237 pVM->hm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjAPIC, 0); 238 ASMMemZero32(pVM->hm.s.vmx.pAPIC, PAGE_SIZE); 239 239 } 240 240 else 241 241 { 242 pVM->h waccm.s.vmx.pMemObjAPIC = 0;243 pVM->h waccm.s.vmx.pAPIC = 0;244 pVM->h waccm.s.vmx.pAPICPhys = 0;242 pVM->hm.s.vmx.pMemObjAPIC = 0; 243 pVM->hm.s.vmx.pAPIC = 0; 244 pVM->hm.s.vmx.pAPICPhys = 0; 245 245 } 246 246 247 247 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 248 248 { 249 rc = RTR0MemObjAllocCont(&pVM->h waccm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */);249 rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.pMemObjScratch, PAGE_SIZE, false /* executable R0 mapping */); 250 250 AssertRC(rc); 251 251 if (RT_FAILURE(rc)) 252 252 return rc; 253 253 254 pVM->h waccm.s.vmx.pScratch = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjScratch);255 pVM->h waccm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjScratch, 0);256 257 ASMMemZero32(pVM->h waccm.s.vmx.pScratch, PAGE_SIZE);258 strcpy((char *)pVM->h waccm.s.vmx.pScratch, "SCRATCH Magic");259 *(uint64_t *)(pVM->h waccm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);254 pVM->hm.s.vmx.pScratch = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.pMemObjScratch); 255 pVM->hm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.pMemObjScratch, 0); 256 257 ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE); 258 strcpy((char *)pVM->hm.s.vmx.pScratch, "SCRATCH Magic"); 259 *(uint64_t *)(pVM->hm.s.vmx.pScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF); 260 260 } 261 261 #endif … … 266 266 PVMCPU pVCpu = &pVM->aCpus[i]; 267 267 268 pVCpu->h waccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;268 pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ; 269 269 270 270 /* Allocate one page for the VM control structure (VMCS). */ 271 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */);271 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* executable R0 mapping */); 272 272 AssertRC(rc); 273 273 if (RT_FAILURE(rc)) 274 274 return rc; 275 275 276 pVCpu->h waccm.s.vmx.pvVMCS = RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVMCS);277 pVCpu->h waccm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVMCS, 0);278 ASMMemZeroPage(pVCpu->h waccm.s.vmx.pvVMCS);279 280 pVCpu->h waccm.s.vmx.cr0_mask = 0;281 pVCpu->h waccm.s.vmx.cr4_mask = 0;276 pVCpu->hm.s.vmx.pvVMCS = RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVMCS); 277 pVCpu->hm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVMCS, 0); 278 ASMMemZeroPage(pVCpu->hm.s.vmx.pvVMCS); 279 280 pVCpu->hm.s.vmx.cr0_mask = 0; 281 pVCpu->hm.s.vmx.cr4_mask = 0; 282 282 283 283 /* Allocate one page for the virtual APIC page for TPR caching. */ 284 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */);284 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVAPIC, PAGE_SIZE, false /* executable R0 mapping */); 285 285 AssertRC(rc); 286 286 if (RT_FAILURE(rc)) 287 287 return rc; 288 288 289 pVCpu->h waccm.s.vmx.pbVAPIC = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVAPIC);290 pVCpu->h waccm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, 0);291 ASMMemZeroPage(pVCpu->h waccm.s.vmx.pbVAPIC);289 pVCpu->hm.s.vmx.pbVAPIC = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVAPIC); 290 pVCpu->hm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVAPIC, 0); 291 ASMMemZeroPage(pVCpu->hm.s.vmx.pbVAPIC); 292 292 293 293 /* Allocate the MSR bitmap if this feature is supported. */ 294 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)295 { 296 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */);294 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 295 { 296 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, false /* executable R0 mapping */); 297 297 AssertRC(rc); 298 298 if (RT_FAILURE(rc)) 299 299 return rc; 300 300 301 pVCpu->h waccm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap);302 pVCpu->h waccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 0);303 memset(pVCpu->h waccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);301 pVCpu->hm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjMSRBitmap); 302 pVCpu->hm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjMSRBitmap, 0); 303 memset(pVCpu->hm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE); 304 304 } 305 305 306 306 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 307 307 /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */ 308 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */);308 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, false /* executable R0 mapping */); 309 309 AssertRC(rc); 310 310 if (RT_FAILURE(rc)) 311 311 return rc; 312 312 313 pVCpu->h waccm.s.vmx.pGuestMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR);314 pVCpu->h waccm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 0);315 Assert(!(pVCpu->h waccm.s.vmx.pGuestMSRPhys & 0xf));316 memset(pVCpu->h waccm.s.vmx.pGuestMSR, 0, PAGE_SIZE);313 pVCpu->hm.s.vmx.pGuestMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjGuestMSR); 314 pVCpu->hm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjGuestMSR, 0); 315 Assert(!(pVCpu->hm.s.vmx.pGuestMSRPhys & 0xf)); 316 memset(pVCpu->hm.s.vmx.pGuestMSR, 0, PAGE_SIZE); 317 317 318 318 /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */ 319 rc = RTR0MemObjAllocCont(&pVCpu->h waccm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */);319 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.pMemObjHostMSR, PAGE_SIZE, false /* executable R0 mapping */); 320 320 AssertRC(rc); 321 321 if (RT_FAILURE(rc)) 322 322 return rc; 323 323 324 pVCpu->h waccm.s.vmx.pHostMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjHostMSR);325 pVCpu->h waccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0);326 Assert(!(pVCpu->h waccm.s.vmx.pHostMSRPhys & 0xf));327 memset(pVCpu->h waccm.s.vmx.pHostMSR, 0, PAGE_SIZE);324 pVCpu->hm.s.vmx.pHostMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.pMemObjHostMSR); 325 pVCpu->hm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.pMemObjHostMSR, 0); 326 Assert(!(pVCpu->hm.s.vmx.pHostMSRPhys & 0xf)); 327 memset(pVCpu->hm.s.vmx.pHostMSR, 0, PAGE_SIZE); 328 328 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 329 329 330 330 /* Current guest paging mode. */ 331 pVCpu->h waccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;331 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL; 332 332 333 333 #ifdef LOG_ENABLED 334 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->h waccm.s.vmx.pvVMCS, (uint32_t)pVCpu->hwaccm.s.vmx.HCPhysVMCS);334 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hm.s.vmx.pvVMCS, (uint32_t)pVCpu->hm.s.vmx.HCPhysVMCS); 335 335 #endif 336 336 } … … 352 352 PVMCPU pVCpu = &pVM->aCpus[i]; 353 353 354 if (pVCpu->h waccm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ)355 { 356 RTR0MemObjFree(pVCpu->h waccm.s.vmx.hMemObjVMCS, false);357 pVCpu->h waccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;358 pVCpu->h waccm.s.vmx.pvVMCS = 0;359 pVCpu->h waccm.s.vmx.HCPhysVMCS = 0;360 } 361 if (pVCpu->h waccm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ)362 { 363 RTR0MemObjFree(pVCpu->h waccm.s.vmx.hMemObjVAPIC, false);364 pVCpu->h waccm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ;365 pVCpu->h waccm.s.vmx.pbVAPIC = 0;366 pVCpu->h waccm.s.vmx.HCPhysVAPIC = 0;367 } 368 if (pVCpu->h waccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)369 { 370 RTR0MemObjFree(pVCpu->h waccm.s.vmx.pMemObjMSRBitmap, false);371 pVCpu->h waccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;372 pVCpu->h waccm.s.vmx.pMSRBitmap = 0;373 pVCpu->h waccm.s.vmx.pMSRBitmapPhys = 0;354 if (pVCpu->hm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ) 355 { 356 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVMCS, false); 357 pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ; 358 pVCpu->hm.s.vmx.pvVMCS = 0; 359 pVCpu->hm.s.vmx.HCPhysVMCS = 0; 360 } 361 if (pVCpu->hm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ) 362 { 363 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVAPIC, false); 364 pVCpu->hm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ; 365 pVCpu->hm.s.vmx.pbVAPIC = 0; 366 pVCpu->hm.s.vmx.HCPhysVAPIC = 0; 367 } 368 if (pVCpu->hm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ) 369 { 370 RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjMSRBitmap, false); 371 pVCpu->hm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ; 372 pVCpu->hm.s.vmx.pMSRBitmap = 0; 373 pVCpu->hm.s.vmx.pMSRBitmapPhys = 0; 374 374 } 375 375 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 376 if (pVCpu->h waccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)377 { 378 RTR0MemObjFree(pVCpu->h waccm.s.vmx.pMemObjHostMSR, false);379 pVCpu->h waccm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;380 pVCpu->h waccm.s.vmx.pHostMSR = 0;381 pVCpu->h waccm.s.vmx.pHostMSRPhys = 0;382 } 383 if (pVCpu->h waccm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)384 { 385 RTR0MemObjFree(pVCpu->h waccm.s.vmx.pMemObjGuestMSR, false);386 pVCpu->h waccm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;387 pVCpu->h waccm.s.vmx.pGuestMSR = 0;388 pVCpu->h waccm.s.vmx.pGuestMSRPhys = 0;376 if (pVCpu->hm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ) 377 { 378 RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjHostMSR, false); 379 pVCpu->hm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ; 380 pVCpu->hm.s.vmx.pHostMSR = 0; 381 pVCpu->hm.s.vmx.pHostMSRPhys = 0; 382 } 383 if (pVCpu->hm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ) 384 { 385 RTR0MemObjFree(pVCpu->hm.s.vmx.pMemObjGuestMSR, false); 386 pVCpu->hm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ; 387 pVCpu->hm.s.vmx.pGuestMSR = 0; 388 pVCpu->hm.s.vmx.pGuestMSRPhys = 0; 389 389 } 390 390 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 391 391 } 392 if (pVM->h waccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)393 { 394 RTR0MemObjFree(pVM->h waccm.s.vmx.pMemObjAPIC, false);395 pVM->h waccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;396 pVM->h waccm.s.vmx.pAPIC = 0;397 pVM->h waccm.s.vmx.pAPICPhys = 0;392 if (pVM->hm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ) 393 { 394 RTR0MemObjFree(pVM->hm.s.vmx.pMemObjAPIC, false); 395 pVM->hm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ; 396 pVM->hm.s.vmx.pAPIC = 0; 397 pVM->hm.s.vmx.pAPICPhys = 0; 398 398 } 399 399 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 400 if (pVM->h waccm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ)401 { 402 ASMMemZero32(pVM->h waccm.s.vmx.pScratch, PAGE_SIZE);403 RTR0MemObjFree(pVM->h waccm.s.vmx.pMemObjScratch, false);404 pVM->h waccm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ;405 pVM->h waccm.s.vmx.pScratch = 0;406 pVM->h waccm.s.vmx.pScratchPhys = 0;400 if (pVM->hm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ) 401 { 402 ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE); 403 RTR0MemObjFree(pVM->hm.s.vmx.pMemObjScratch, false); 404 pVM->hm.s.vmx.pMemObjScratch = NIL_RTR0MEMOBJ; 405 pVM->hm.s.vmx.pScratch = 0; 406 pVM->hm.s.vmx.pScratchPhys = 0; 407 407 } 408 408 #endif … … 424 424 AssertReturn(pVM, VERR_INVALID_PARAMETER); 425 425 426 /* Initialize these always, see h waccmR3InitFinalizeR0().*/427 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NONE;428 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE;426 /* Initialize these always, see hmR3InitFinalizeR0().*/ 427 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NONE; 428 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NONE; 429 429 430 430 /* Determine optimal flush type for EPT. */ 431 if (pVM->h waccm.s.fNestedPaging)432 { 433 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)434 { 435 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)436 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT;437 else if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)438 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS;431 if (pVM->hm.s.fNestedPaging) 432 { 433 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT) 434 { 435 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT) 436 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT; 437 else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS) 438 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS; 439 439 else 440 440 { … … 443 443 * We cannot ignore EPT at this point as we've already setup Unrestricted Guest execution. 444 444 */ 445 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;445 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED; 446 446 return VERR_VMX_GENERIC; 447 447 } … … 452 452 * Should never really happen. EPT is supported but INVEPT instruction is not supported. 453 453 */ 454 pVM->h waccm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED;454 pVM->hm.s.vmx.enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED; 455 455 return VERR_VMX_GENERIC; 456 456 } … … 458 458 459 459 /* Determine optimal flush type for VPID. */ 460 if (pVM->h waccm.s.vmx.fVPID)461 { 462 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)463 { 464 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)465 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT;466 else if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)467 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS;460 if (pVM->hm.s.vmx.fVPID) 461 { 462 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID) 463 { 464 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT) 465 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT; 466 else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS) 467 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS; 468 468 else 469 469 { … … 472 472 * We do not handle other flush type combinations, ignore VPID capabilities. 473 473 */ 474 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)474 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 475 475 Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_INDIV_ADDR supported. Ignoring VPID.\n")); 476 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)476 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS) 477 477 Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 478 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;479 pVM->h waccm.s.vmx.fVPID = false;478 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED; 479 pVM->hm.s.vmx.fVPID = false; 480 480 } 481 481 } … … 487 487 */ 488 488 Log(("VMXR0SetupVM: VPID supported without INVEPT support. Ignoring VPID.\n")); 489 pVM->h waccm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED;490 pVM->h waccm.s.vmx.fVPID = false;489 pVM->hm.s.vmx.enmFlushVPID = VMX_FLUSH_VPID_NOT_SUPPORTED; 490 pVM->hm.s.vmx.fVPID = false; 491 491 } 492 492 } … … 496 496 PVMCPU pVCpu = &pVM->aCpus[i]; 497 497 498 AssertPtr(pVCpu->h waccm.s.vmx.pvVMCS);498 AssertPtr(pVCpu->hm.s.vmx.pvVMCS); 499 499 500 500 /* Set revision dword at the beginning of the VMCS structure. */ 501 *(uint32_t *)pVCpu->h waccm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);501 *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info); 502 502 503 503 /* 504 504 * Clear and activate the VMCS. 505 505 */ 506 Log(("HCPhysVMCS = %RHp\n", pVCpu->h waccm.s.vmx.HCPhysVMCS));507 rc = VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);506 Log(("HCPhysVMCS = %RHp\n", pVCpu->hm.s.vmx.HCPhysVMCS)); 507 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 508 508 if (RT_FAILURE(rc)) 509 509 goto vmx_end; 510 510 511 rc = VMXActivateVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);511 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 512 512 if (RT_FAILURE(rc)) 513 513 goto vmx_end; … … 517 517 * Set required bits to one and zero according to the MSR capabilities. 518 518 */ 519 val = pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;519 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 520 520 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts */ 521 521 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts */ … … 524 524 * Enable the VMX preemption timer. 525 525 */ 526 if (pVM->h waccm.s.vmx.fUsePreemptTimer)526 if (pVM->hm.s.vmx.fUsePreemptTimer) 527 527 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER; 528 val &= pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;528 val &= pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; 529 529 530 530 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, val); … … 535 535 * Set required bits to one and zero according to the MSR capabilities. 536 536 */ 537 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;537 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; 538 538 /* Program which event cause VM-exits and which features we want to use. */ 539 539 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT … … 547 547 548 548 /* Without nested paging we should intercept invlpg and cr3 mov instructions. */ 549 if (!pVM->h waccm.s.fNestedPaging)549 if (!pVM->hm.s.fNestedPaging) 550 550 { 551 551 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT … … 558 558 * failure with an invalid control fields error. (combined with some other exit reasons) 559 559 */ 560 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)560 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 561 561 { 562 562 /* CR8 reads from the APIC shadow page; writes cause an exit is they lower the TPR below the threshold */ 563 563 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW; 564 Assert(pVM->h waccm.s.vmx.pAPIC);564 Assert(pVM->hm.s.vmx.pAPIC); 565 565 } 566 566 else … … 568 568 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT; 569 569 570 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)571 { 572 Assert(pVCpu->h waccm.s.vmx.pMSRBitmapPhys);570 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 571 { 572 Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys); 573 573 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS; 574 574 } … … 579 579 /* Mask away the bits that the CPU doesn't support */ 580 580 /** @todo make sure they don't conflict with the above requirements. */ 581 val &= pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;582 pVCpu->h waccm.s.vmx.proc_ctls = val;581 val &= pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; 582 pVCpu->hm.s.vmx.proc_ctls = val; 583 583 584 584 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, val); 585 585 AssertRC(rc); 586 586 587 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)587 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 588 588 { 589 589 /* … … 591 591 * Set required bits to one and zero according to the MSR capabilities. 592 592 */ 593 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;593 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; 594 594 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; 595 595 596 if (pVM->h waccm.s.fNestedPaging)596 if (pVM->hm.s.fNestedPaging) 597 597 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; 598 598 599 if (pVM->h waccm.s.vmx.fVPID)599 if (pVM->hm.s.vmx.fVPID) 600 600 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; 601 601 602 if (pVM->h waccm.s.fHasIoApic)602 if (pVM->hm.s.fHasIoApic) 603 603 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; 604 604 605 if (pVM->h waccm.s.vmx.fUnrestrictedGuest)605 if (pVM->hm.s.vmx.fUnrestrictedGuest) 606 606 val |= VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE; 607 607 608 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)608 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 609 609 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; 610 610 611 611 /* Mask away the bits that the CPU doesn't support */ 612 612 /** @todo make sure they don't conflict with the above requirements. */ 613 val &= pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;614 pVCpu->h waccm.s.vmx.proc_ctls2 = val;613 val &= pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; 614 pVCpu->hm.s.vmx.proc_ctls2 = val; 615 615 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2, val); 616 616 AssertRC(rc); … … 656 656 * Set the MSR bitmap address. 657 657 */ 658 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)659 { 660 Assert(pVCpu->h waccm.s.vmx.pMSRBitmapPhys);661 662 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->h waccm.s.vmx.pMSRBitmapPhys);658 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 659 { 660 Assert(pVCpu->hm.s.vmx.pMSRBitmapPhys); 661 662 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.pMSRBitmapPhys); 663 663 AssertRC(rc); 664 664 … … 676 676 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true); 677 677 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true); 678 if (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)678 if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 679 679 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true); 680 680 } … … 684 684 * Set the guest & host MSR load/store physical addresses. 685 685 */ 686 Assert(pVCpu->h waccm.s.vmx.pGuestMSRPhys);687 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->h waccm.s.vmx.pGuestMSRPhys);686 Assert(pVCpu->hm.s.vmx.pGuestMSRPhys); 687 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys); 688 688 AssertRC(rc); 689 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->h waccm.s.vmx.pGuestMSRPhys);689 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.pGuestMSRPhys); 690 690 AssertRC(rc); 691 Assert(pVCpu->h waccm.s.vmx.pHostMSRPhys);692 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->h waccm.s.vmx.pHostMSRPhys);691 Assert(pVCpu->hm.s.vmx.pHostMSRPhys); 692 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.pHostMSRPhys); 693 693 AssertRC(rc); 694 694 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ … … 701 701 AssertRC(rc); 702 702 703 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)704 { 705 Assert(pVM->h waccm.s.vmx.pMemObjAPIC);703 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 704 { 705 Assert(pVM->hm.s.vmx.pMemObjAPIC); 706 706 /* Optional */ 707 707 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0); 708 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->h waccm.s.vmx.HCPhysVAPIC);709 710 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)711 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->h waccm.s.vmx.pAPICPhys);708 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVAPIC); 709 710 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 711 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.pAPICPhys); 712 712 713 713 AssertRC(rc); … … 722 722 * VMCS data back to memory. 723 723 */ 724 rc = VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);724 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 725 725 AssertRC(rc); 726 726 … … 728 728 * Configure the VMCS read cache. 729 729 */ 730 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;730 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 731 731 732 732 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_RIP); … … 769 769 VMXSetupCachedReadVMCS(pCache, VMX_VMCS32_RO_IDT_ERRCODE); 770 770 771 if (pVM->h waccm.s.fNestedPaging)771 if (pVM->hm.s.fNestedPaging) 772 772 { 773 773 VMXSetupCachedReadVMCS(pCache, VMX_VMCS64_GUEST_CR3); … … 782 782 * Setup the right TLB function based on CPU capabilities. 783 783 */ 784 if (pVM->h waccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID)785 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth;786 else if (pVM->h waccm.s.fNestedPaging)787 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT;788 else if (pVM->h waccm.s.vmx.fVPID)789 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID;784 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID) 785 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBBoth; 786 else if (pVM->hm.s.fNestedPaging) 787 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBEPT; 788 else if (pVM->hm.s.vmx.fVPID) 789 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBVPID; 790 790 else 791 pVM->h waccm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy;791 pVM->hm.s.vmx.pfnSetupTaggedTLB = hmR0VmxSetupTLBDummy; 792 792 793 793 vmx_end: … … 808 808 { 809 809 unsigned ulBit; 810 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->h waccm.s.vmx.pMSRBitmap;810 uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hm.s.vmx.pMSRBitmap; 811 811 812 812 /* … … 867 867 868 868 #ifdef VBOX_WITH_STATISTICS 869 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]);869 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]); 870 870 #endif 871 871 … … 892 892 893 893 if ( CPUMIsGuestInRealModeEx(pCtx) 894 && pVM->h waccm.s.vmx.pRealModeTSS)894 && pVM->hm.s.vmx.pRealModeTSS) 895 895 { 896 896 RTGCPHYS GCPhysHandler; … … 967 967 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC); 968 968 969 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;969 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; 970 970 return VINF_SUCCESS; 971 971 } … … 998 998 * Dispatch any pending interrupts (injected before, but a VM exit occurred prematurely). 999 999 */ 1000 if (pVCpu->h waccm.s.Event.fPending)1001 { 1002 Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->h waccm.s.Event.intInfo,1003 pVCpu->h waccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));1004 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntReinject);1005 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->h waccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode);1000 if (pVCpu->hm.s.Event.fPending) 1001 { 1002 Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.intInfo, 1003 pVCpu->hm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2)); 1004 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject); 1005 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hm.s.Event.intInfo, 0, pVCpu->hm.s.Event.errCode); 1006 1006 AssertRC(rc); 1007 1007 1008 pVCpu->h waccm.s.Event.fPending = false;1008 pVCpu->hm.s.Event.fPending = false; 1009 1009 return VINF_SUCCESS; 1010 1010 } … … 1040 1040 if (!(pCtx->eflags.u32 & X86_EFL_IF)) 1041 1041 { 1042 if (!(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT))1042 if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT)) 1043 1043 { 1044 1044 LogFlow(("Enable irq window exit!\n")); 1045 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;1046 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);1045 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT; 1046 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 1047 1047 AssertRC(rc); 1048 1048 } … … 1065 1065 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */ 1066 1066 Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC))); 1067 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchGuestIrq);1067 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 1068 1068 /* Just continue */ 1069 1069 } … … 1142 1142 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 1143 1143 1144 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatIntInject);1144 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 1145 1145 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo, 0, errCode); 1146 1146 AssertRC(rc); … … 1166 1166 * Host CPU Context. 1167 1167 */ 1168 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)1168 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT) 1169 1169 { 1170 1170 RTIDTR idtr; … … 1185 1185 if (VMX_IS_64BIT_HOST_MODE()) 1186 1186 { 1187 cr3 = h waccmR0Get64bitCR3();1187 cr3 = hmR0Get64bitCR3(); 1188 1188 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_CR3, cr3); 1189 1189 } … … 1250 1250 { 1251 1251 X86XDTR64 gdtr64, idtr64; 1252 h waccmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64);1252 hmR0Get64bitGDTRandIDTR(&gdtr64, &idtr64); 1253 1253 rc = VMXWriteVMCS64(VMX_VMCS_HOST_GDTR_BASE, gdtr64.uAddr); 1254 1254 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_IDTR_BASE, gdtr64.uAddr); … … 1356 1356 * the world switch back to the host. 1357 1357 */ 1358 PVMXMSR pMsr = (PVMXMSR)pVCpu->h waccm.s.vmx.pHostMSR;1358 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pHostMSR; 1359 1359 unsigned idxMsr = 0; 1360 1360 … … 1404 1404 # endif 1405 1405 1406 if (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)1406 if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 1407 1407 { 1408 1408 pMsr->u32IndexMSR = MSR_K8_TSC_AUX; … … 1418 1418 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 1419 1419 1420 pVCpu->h waccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;1420 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT; 1421 1421 } 1422 1422 return rc; … … 1515 1515 */ 1516 1516 /** @todo NP state won't change so maybe we should build the initial trap mask up front? */ 1517 if (!pVM->h waccm.s.fNestedPaging)1517 if (!pVM->hm.s.fNestedPaging) 1518 1518 u32TrapMask |= RT_BIT(X86_XCPT_PF); 1519 1519 … … 1531 1531 /** @todo Despite the claim to intercept everything, with NP we do not intercept #PF. Should we? */ 1532 1532 if ( CPUMIsGuestInRealModeEx(pCtx) 1533 && pVM->h waccm.s.vmx.pRealModeTSS)1533 && pVM->hm.s.vmx.pRealModeTSS) 1534 1534 { 1535 1535 u32TrapMask |= RT_BIT(X86_XCPT_DE) … … 1572 1572 X86EFLAGS eflags; 1573 1573 1574 Assert(!(pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_ALL_GUEST));1574 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)); 1575 1575 1576 1576 /* … … 1592 1592 */ 1593 1593 if ( CPUMIsGuestInRealModeEx(pCtx) 1594 && pVM->h waccm.s.vmx.pRealModeTSS)1595 { 1596 pVCpu->h waccm.s.vmx.RealMode.eflags = eflags;1594 && pVM->hm.s.vmx.pRealModeTSS) 1595 { 1596 pVCpu->hm.s.vmx.RealMode.eflags = eflags; 1597 1597 1598 1598 eflags.Bits.u1VM = 1; … … 1623 1623 * Set required bits to one and zero according to the MSR capabilities. 1624 1624 */ 1625 val = pVM->h waccm.s.vmx.msr.vmx_entry.n.disallowed0;1625 val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; 1626 1626 1627 1627 /* … … 1638 1638 * Mask away the bits that the CPU doesn't support. 1639 1639 */ 1640 val &= pVM->h waccm.s.vmx.msr.vmx_entry.n.allowed1;1640 val &= pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; 1641 1641 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val); 1642 1642 AssertRC(rc); … … 1646 1646 * Set required bits to one and zero according to the MSR capabilities. 1647 1647 */ 1648 val = pVM->h waccm.s.vmx.msr.vmx_exit.n.disallowed0;1648 val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; 1649 1649 1650 1650 /* … … 1664 1664 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64)); 1665 1665 #endif 1666 val &= pVM->h waccm.s.vmx.msr.vmx_exit.n.allowed1;1666 val &= pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; 1667 1667 1668 1668 /* … … 1675 1675 * Guest CPU context: ES, CS, SS, DS, FS, GS. 1676 1676 */ 1677 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)1678 { 1679 if (pVM->h waccm.s.vmx.pRealModeTSS)1677 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS) 1678 { 1679 if (pVM->hm.s.vmx.pRealModeTSS) 1680 1680 { 1681 1681 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu); 1682 if (pVCpu->h waccm.s.vmx.enmLastSeenGuestMode != enmGuestMode)1682 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode != enmGuestMode) 1683 1683 { 1684 1684 /* 1685 1685 * Correct weird requirements for switching to protected mode. 1686 1686 */ 1687 if ( pVCpu->h waccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL1687 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL 1688 1688 && enmGuestMode >= PGMMODE_PROTECTED) 1689 1689 { … … 1708 1708 pCtx->ss.Attr.n.u2Dpl = 0; 1709 1709 } 1710 pVCpu->h waccm.s.vmx.enmLastSeenGuestMode = enmGuestMode;1710 pVCpu->hm.s.vmx.enmLastSeenGuestMode = enmGuestMode; 1711 1711 } 1712 1712 else if ( CPUMIsGuestInRealModeEx(pCtx) … … 1741 1741 * Guest CPU context: LDTR. 1742 1742 */ 1743 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)1743 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR) 1744 1744 { 1745 1745 if (pCtx->ldtr.Sel == 0) … … 1764 1764 * Guest CPU context: TR. 1765 1765 */ 1766 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)1766 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR) 1767 1767 { 1768 1768 /* … … 1771 1771 */ 1772 1772 if ( CPUMIsGuestInRealModeEx(pCtx) 1773 && pVM->h waccm.s.vmx.pRealModeTSS)1773 && pVM->hm.s.vmx.pRealModeTSS) 1774 1774 { 1775 1775 RTGCPHYS GCPhys; 1776 1776 1777 1777 /* We convert it here every time as PCI regions could be reconfigured. */ 1778 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->h waccm.s.vmx.pRealModeTSS, &GCPhys);1778 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys); 1779 1779 AssertRC(rc); 1780 1780 1781 1781 rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_TR, 0); 1782 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, H WACCM_VTX_TSS_SIZE);1782 rc |= VMXWriteVMCS(VMX_VMCS32_GUEST_TR_LIMIT, HM_VTX_TSS_SIZE); 1783 1783 rc |= VMXWriteVMCS64(VMX_VMCS64_GUEST_TR_BASE, GCPhys /* phys = virt in this mode */); 1784 1784 … … 1817 1817 * Guest CPU context: GDTR. 1818 1818 */ 1819 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)1819 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR) 1820 1820 { 1821 1821 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); … … 1827 1827 * Guest CPU context: IDTR. 1828 1828 */ 1829 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)1829 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR) 1830 1830 { 1831 1831 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); … … 1837 1837 * Sysenter MSRs. 1838 1838 */ 1839 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)1839 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR) 1840 1840 { 1841 1841 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs); … … 1848 1848 * Guest CPU context: Control registers. 1849 1849 */ 1850 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)1850 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0) 1851 1851 { 1852 1852 val = pCtx->cr0; … … 1867 1867 } 1868 1868 /* Protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */ 1869 if (!pVM->h waccm.s.vmx.fUnrestrictedGuest)1869 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 1870 1870 val |= X86_CR0_PE | X86_CR0_PG; 1871 1871 1872 if (pVM->h waccm.s.fNestedPaging)1872 if (pVM->hm.s.fNestedPaging) 1873 1873 { 1874 1874 if (CPUMIsGuestInPagedProtectedModeEx(pCtx)) 1875 1875 { 1876 1876 /* Disable CR3 read/write monitoring as we don't need it for EPT. */ 1877 pVCpu->h waccm.s.vmx.proc_ctls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT1877 pVCpu->hm.s.vmx.proc_ctls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1878 1878 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT); 1879 1879 } … … 1881 1881 { 1882 1882 /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */ 1883 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT1883 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 1884 1884 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; 1885 1885 } 1886 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);1886 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 1887 1887 AssertRC(rc); 1888 1888 } … … 1915 1915 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_MP; 1916 1916 1917 pVCpu->h waccm.s.vmx.cr0_mask = val;1917 pVCpu->hm.s.vmx.cr0_mask = val; 1918 1918 1919 1919 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR0_MASK, val); … … 1922 1922 } 1923 1923 1924 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)1924 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4) 1925 1925 { 1926 1926 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4); 1927 1927 Log2(("Guest CR4-shadow %08x\n", pCtx->cr4)); 1928 1928 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */ 1929 val = pCtx->cr4 | (uint32_t)pVM->h waccm.s.vmx.msr.vmx_cr4_fixed0;1930 1931 if (!pVM->h waccm.s.fNestedPaging)1932 { 1933 switch (pVCpu->h waccm.s.enmShadowMode)1929 val = pCtx->cr4 | (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0; 1930 1931 if (!pVM->hm.s.fNestedPaging) 1932 { 1933 switch (pVCpu->hm.s.enmShadowMode) 1934 1934 { 1935 1935 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */ … … 1959 1959 } 1960 1960 else if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx) 1961 && !pVM->h waccm.s.vmx.fUnrestrictedGuest)1961 && !pVM->hm.s.vmx.fUnrestrictedGuest) 1962 1962 { 1963 1963 /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */ … … 1971 1971 */ 1972 1972 if ( CPUMIsGuestInRealModeEx(pCtx) 1973 && pVM->h waccm.s.vmx.pRealModeTSS)1973 && pVM->hm.s.vmx.pRealModeTSS) 1974 1974 { 1975 1975 val &= ~X86_CR4_VME; … … 1988 1988 | X86_CR4_PSE 1989 1989 | X86_CR4_VMXE; 1990 pVCpu->h waccm.s.vmx.cr4_mask = val;1990 pVCpu->hm.s.vmx.cr4_mask = val; 1991 1991 1992 1992 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR4_MASK, val); … … 1997 1997 #if 0 1998 1998 /* Enable single stepping if requested and CPU supports it. */ 1999 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)1999 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG) 2000 2000 if (DBGFIsStepping(pVCpu)) 2001 2001 { 2002 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;2003 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2002 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG; 2003 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2004 2004 AssertRC(rc); 2005 2005 } 2006 2006 #endif 2007 2007 2008 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)2009 { 2010 if (pVM->h waccm.s.fNestedPaging)2008 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3) 2009 { 2010 if (pVM->hm.s.fNestedPaging) 2011 2011 { 2012 2012 Assert(PGMGetHyperCR3(pVCpu)); 2013 pVCpu->h waccm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);2014 2015 Assert(!(pVCpu->h waccm.s.vmx.GCPhysEPTP & 0xfff));2013 pVCpu->hm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu); 2014 2015 Assert(!(pVCpu->hm.s.vmx.GCPhysEPTP & 0xfff)); 2016 2016 /** @todo Check the IA32_VMX_EPT_VPID_CAP MSR for other supported memory types. */ 2017 pVCpu->h waccm.s.vmx.GCPhysEPTP |= VMX_EPT_MEMTYPE_WB2017 pVCpu->hm.s.vmx.GCPhysEPTP |= VMX_EPT_MEMTYPE_WB 2018 2018 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT); 2019 2019 2020 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->h waccm.s.vmx.GCPhysEPTP);2020 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.GCPhysEPTP); 2021 2021 AssertRC(rc); 2022 2022 2023 2023 if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx) 2024 && !pVM->h waccm.s.vmx.fUnrestrictedGuest)2024 && !pVM->hm.s.vmx.fUnrestrictedGuest) 2025 2025 { 2026 2026 RTGCPHYS GCPhys; 2027 2027 2028 2028 /* We convert it here every time as PCI regions could be reconfigured. */ 2029 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);2030 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable));2029 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys); 2030 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hm.s.vmx.pNonPagingModeEPTPageTable)); 2031 2031 2032 2032 /* … … 2058 2058 * Guest CPU context: Debug registers. 2059 2059 */ 2060 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)2060 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG) 2061 2061 { 2062 2062 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */ … … 2094 2094 && !DBGFIsStepping(pVCpu)) 2095 2095 { 2096 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxArmed);2096 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); 2097 2097 2098 2098 /* Disable DRx move intercepts. */ 2099 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;2100 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2099 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 2100 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2101 2101 AssertRC(rc); 2102 2102 … … 2123 2123 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 2124 2124 #elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 2125 pVCpu->h waccm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;2125 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; 2126 2126 #else 2127 2127 # ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 2128 if (!pVM->h waccm.s.fAllow64BitGuests)2128 if (!pVM->hm.s.fAllow64BitGuests) 2129 2129 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 2130 2130 # endif 2131 pVCpu->h waccm.s.vmx.pfnStartVM = VMXR0StartVM64;2132 #endif 2133 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_MSR)2131 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64; 2132 #endif 2133 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR) 2134 2134 { 2135 2135 /* Update these as wrmsr might have changed them. */ … … 2142 2142 else 2143 2143 { 2144 pVCpu->h waccm.s.vmx.pfnStartVM = VMXR0StartVM32;2144 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 2145 2145 } 2146 2146 … … 2152 2152 * during VM-entry and restored into the VM-exit store area during VM-exit. 2153 2153 */ 2154 PVMXMSR pMsr = (PVMXMSR)pVCpu->h waccm.s.vmx.pGuestMSR;2154 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR; 2155 2155 unsigned idxMsr = 0; 2156 2156 … … 2196 2196 } 2197 2197 2198 if ( pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP2198 if ( pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP 2199 2199 && (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP)) 2200 2200 { … … 2206 2206 } 2207 2207 2208 pVCpu->h waccm.s.vmx.cCachedMSRs = idxMsr;2208 pVCpu->hm.s.vmx.cCachedMSRs = idxMsr; 2209 2209 2210 2210 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr); … … 2216 2216 2217 2217 bool fOffsettedTsc; 2218 if (pVM->h waccm.s.vmx.fUsePreemptTimer)2219 { 2220 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->h waccm.s.vmx.u64TSCOffset);2218 if (pVM->hm.s.vmx.fUsePreemptTimer) 2219 { 2220 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset); 2221 2221 2222 2222 /* Make sure the returned values have sane upper and lower boundaries. */ … … 2226 2226 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */ 2227 2227 2228 cTicksToDeadline >>= pVM->h waccm.s.vmx.cPreemptTimerShift;2228 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift; 2229 2229 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16); 2230 2230 rc = VMXWriteVMCS(VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE, cPreemptionTickCount); … … 2232 2232 } 2233 2233 else 2234 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->h waccm.s.vmx.u64TSCOffset);2234 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset); 2235 2235 2236 2236 if (fOffsettedTsc) 2237 2237 { 2238 2238 uint64_t u64CurTSC = ASMReadTSC(); 2239 if (u64CurTSC + pVCpu->h waccm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))2239 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu)) 2240 2240 { 2241 2241 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */ 2242 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->h waccm.s.vmx.u64TSCOffset);2242 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); 2243 2243 AssertRC(rc); 2244 2244 2245 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;2246 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2245 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; 2246 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2247 2247 AssertRC(rc); 2248 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCOffset);2248 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCOffset); 2249 2249 } 2250 2250 else … … 2252 2252 /* Fall back to rdtsc, rdtscp emulation as we would otherwise pass decreasing tsc values to the guest. */ 2253 2253 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, 2254 pVCpu->h waccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset,2255 TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->h waccm.s.vmx.u64TSCOffset,2254 pVCpu->hm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset, 2255 TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hm.s.vmx.u64TSCOffset, 2256 2256 TMCpuTickGet(pVCpu))); 2257 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;2258 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2257 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; 2258 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2259 2259 AssertRC(rc); 2260 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCInterceptOverFlow);2260 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCInterceptOverFlow); 2261 2261 } 2262 2262 } 2263 2263 else 2264 2264 { 2265 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;2266 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);2265 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT; 2266 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 2267 2267 AssertRC(rc); 2268 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTSCIntercept);2268 STAM_COUNTER_INC(&pVCpu->hm.s.StatTSCIntercept); 2269 2269 } 2270 2270 2271 2271 /* Done with the major changes */ 2272 pVCpu->h waccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;2272 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST; 2273 2273 2274 2274 /* Minimal guest state update (ESP, EIP, EFLAGS mostly) */ … … 2318 2318 VMXReadCachedVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow); 2319 2319 VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR0, &val); 2320 val = (valShadow & pVCpu->h waccm.s.vmx.cr0_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr0_mask);2320 val = (valShadow & pVCpu->hm.s.vmx.cr0_mask) | (val & ~pVCpu->hm.s.vmx.cr0_mask); 2321 2321 CPUMSetGuestCR0(pVCpu, val); 2322 2322 2323 2323 VMXReadCachedVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow); 2324 2324 VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR4, &val); 2325 val = (valShadow & pVCpu->h waccm.s.vmx.cr4_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr4_mask);2325 val = (valShadow & pVCpu->hm.s.vmx.cr4_mask) | (val & ~pVCpu->hm.s.vmx.cr4_mask); 2326 2326 CPUMSetGuestCR4(pVCpu, val); 2327 2327 … … 2330 2330 * the nested paging case where CR3 & CR4 can be changed by the guest. 2331 2331 */ 2332 if ( pVM->h waccm.s.fNestedPaging2332 if ( pVM->hm.s.fNestedPaging 2333 2333 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */ 2334 2334 { 2335 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;2335 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 2336 2336 2337 2337 /* Can be updated behind our back in the nested paging case. */ … … 2384 2384 /* Real mode emulation using v86 mode. */ 2385 2385 if ( CPUMIsGuestInRealModeEx(pCtx) 2386 && pVM->h waccm.s.vmx.pRealModeTSS)2386 && pVM->hm.s.vmx.pRealModeTSS) 2387 2387 { 2388 2388 /* Hide our emulation flags */ … … 2390 2390 2391 2391 /* Restore original IOPL setting as we always use 0. */ 2392 pCtx->eflags.Bits.u2IOPL = pVCpu->h waccm.s.vmx.RealMode.eflags.Bits.u2IOPL;2392 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL; 2393 2393 2394 2394 /* Force a TR resync every time in case we switch modes. */ 2395 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;2395 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_TR; 2396 2396 } 2397 2397 else … … 2405 2405 * Save the possibly changed MSRs that we automatically restore and save during a world switch. 2406 2406 */ 2407 for (unsigned i = 0; i < pVCpu->h waccm.s.vmx.cCachedMSRs; i++)2408 { 2409 PVMXMSR pMsr = (PVMXMSR)pVCpu->h waccm.s.vmx.pGuestMSR;2407 for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedMSRs; i++) 2408 { 2409 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pGuestMSR; 2410 2410 pMsr += i; 2411 2411 … … 2458 2458 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); 2459 2459 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2460 pVCpu->h waccm.s.TlbShootdown.cPages = 0;2460 pVCpu->hm.s.TlbShootdown.cPages = 0; 2461 2461 return; 2462 2462 } … … 2473 2473 PHMGLOBLCPUINFO pCpu; 2474 2474 2475 Assert(pVM->h waccm.s.fNestedPaging && pVM->hwaccm.s.vmx.fVPID);2476 2477 pCpu = H WACCMR0GetCurrentCpu();2475 Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVPID); 2476 2477 pCpu = HMR0GetCurrentCpu(); 2478 2478 2479 2479 /* … … 2484 2484 */ 2485 2485 bool fNewASID = false; 2486 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu2487 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)2488 { 2489 pVCpu->h waccm.s.fForceTLBFlush = true;2486 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2487 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 2488 { 2489 pVCpu->hm.s.fForceTLBFlush = true; 2490 2490 fNewASID = true; 2491 2491 } … … 2495 2495 */ 2496 2496 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2497 pVCpu->h waccm.s.fForceTLBFlush = true;2498 2499 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;2500 2501 if (pVCpu->h waccm.s.fForceTLBFlush)2497 pVCpu->hm.s.fForceTLBFlush = true; 2498 2499 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2500 2501 if (pVCpu->hm.s.fForceTLBFlush) 2502 2502 { 2503 2503 if (fNewASID) 2504 2504 { 2505 2505 ++pCpu->uCurrentASID; 2506 if (pCpu->uCurrentASID >= pVM->h waccm.s.uMaxASID)2506 if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID) 2507 2507 { 2508 2508 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */ … … 2511 2511 } 2512 2512 2513 pVCpu->h waccm.s.uCurrentASID = pCpu->uCurrentASID;2513 pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID; 2514 2514 if (pCpu->fFlushASIDBeforeUse) 2515 2515 { 2516 hmR0VmxFlushVPID(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);2516 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */); 2517 2517 #ifdef VBOX_WITH_STATISTICS 2518 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);2518 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 2519 2519 #endif 2520 2520 } … … 2522 2522 else 2523 2523 { 2524 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)2524 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT) 2525 2525 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */); 2526 2526 else 2527 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2527 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2528 2528 2529 2529 #ifdef VBOX_WITH_STATISTICS … … 2532 2532 * as ASID flushes too, better than including them under StatFlushTLBWorldSwitch. 2533 2533 */ 2534 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);2535 #endif 2536 } 2537 2538 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;2539 pVCpu->h waccm.s.fForceTLBFlush = false;2534 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 2535 #endif 2536 } 2537 2538 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 2539 pVCpu->hm.s.fForceTLBFlush = false; 2540 2540 } 2541 2541 else 2542 2542 { 2543 AssertMsg(pVCpu->h waccm.s.uCurrentASID && pCpu->uCurrentASID,2544 ("h waccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",2545 pVCpu->h waccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,2543 AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID, 2544 ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n", 2545 pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes, 2546 2546 pCpu->uCurrentASID, pCpu->cTLBFlushes)); 2547 2547 2548 2548 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 2549 * not be executed. See h waccmQueueInvlPage() where it is commented2549 * not be executed. See hmQueueInvlPage() where it is commented 2550 2550 * out. Support individual entry flushing someday. */ 2551 2551 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 2552 2552 { 2553 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdown);2553 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 2554 2554 2555 2555 /* … … 2557 2557 * as supported by the CPU. 2558 2558 */ 2559 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)2559 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 2560 2560 { 2561 for (unsigned i = 0; i < pVCpu->h waccm.s.TlbShootdown.cPages; i++)2562 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->h waccm.s.TlbShootdown.aPages[i]);2561 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 2562 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 2563 2563 } 2564 2564 else 2565 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2565 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2566 2566 } 2567 2567 else 2568 2568 { 2569 2569 #ifdef VBOX_WITH_STATISTICS 2570 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);2571 #endif 2572 } 2573 } 2574 pVCpu->h waccm.s.TlbShootdown.cPages = 0;2570 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 2571 #endif 2572 } 2573 } 2574 pVCpu->hm.s.TlbShootdown.cPages = 0; 2575 2575 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2576 2576 2577 AssertMsg(pVCpu->h waccm.s.cTLBFlushes == pCpu->cTLBFlushes,2578 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));2579 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->h waccm.s.uMaxASID,2577 AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes, 2578 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 2579 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID, 2580 2580 ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 2581 AssertMsg(pVCpu->h waccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,2582 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->h waccm.s.uCurrentASID));2581 AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID, 2582 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID)); 2583 2583 2584 2584 /* Update VMCS with the VPID. */ 2585 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->h waccm.s.uCurrentASID);2585 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID); 2586 2586 AssertRC(rc); 2587 2587 } … … 2599 2599 PHMGLOBLCPUINFO pCpu; 2600 2600 2601 Assert(pVM->h waccm.s.fNestedPaging);2602 Assert(!pVM->h waccm.s.vmx.fVPID);2603 2604 pCpu = H WACCMR0GetCurrentCpu();2601 Assert(pVM->hm.s.fNestedPaging); 2602 Assert(!pVM->hm.s.vmx.fVPID); 2603 2604 pCpu = HMR0GetCurrentCpu(); 2605 2605 2606 2606 /* … … 2609 2609 * A change in the TLB flush count implies the host Cpu is online after a suspend/resume. 2610 2610 */ 2611 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu2612 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)2613 { 2614 pVCpu->h waccm.s.fForceTLBFlush = true;2611 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2612 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 2613 { 2614 pVCpu->hm.s.fForceTLBFlush = true; 2615 2615 } 2616 2616 … … 2619 2619 */ 2620 2620 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2621 pVCpu->h waccm.s.fForceTLBFlush = true;2622 2623 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;2624 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;2625 2626 if (pVCpu->h waccm.s.fForceTLBFlush)2627 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2621 pVCpu->hm.s.fForceTLBFlush = true; 2622 2623 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2624 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 2625 2626 if (pVCpu->hm.s.fForceTLBFlush) 2627 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2628 2628 else 2629 2629 { 2630 2630 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 2631 * not be executed. See h waccmQueueInvlPage() where it is commented2631 * not be executed. See hmQueueInvlPage() where it is commented 2632 2632 * out. Support individual entry flushing someday. */ 2633 2633 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) … … 2636 2636 * We cannot flush individual entries without VPID support. Flush using EPT. 2637 2637 */ 2638 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatTlbShootdown);2639 hmR0VmxFlushEPT(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushEPT);2640 } 2641 } 2642 pVCpu->h waccm.s.TlbShootdown.cPages= 0;2638 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 2639 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEPT); 2640 } 2641 } 2642 pVCpu->hm.s.TlbShootdown.cPages= 0; 2643 2643 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2644 2644 2645 2645 #ifdef VBOX_WITH_STATISTICS 2646 if (pVCpu->h waccm.s.fForceTLBFlush)2647 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBWorldSwitch);2646 if (pVCpu->hm.s.fForceTLBFlush) 2647 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch); 2648 2648 else 2649 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);2649 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 2650 2650 #endif 2651 2651 } … … 2663 2663 PHMGLOBLCPUINFO pCpu; 2664 2664 2665 Assert(pVM->h waccm.s.vmx.fVPID);2666 Assert(!pVM->h waccm.s.fNestedPaging);2667 2668 pCpu = H WACCMR0GetCurrentCpu();2665 Assert(pVM->hm.s.vmx.fVPID); 2666 Assert(!pVM->hm.s.fNestedPaging); 2667 2668 pCpu = HMR0GetCurrentCpu(); 2669 2669 2670 2670 /* … … 2674 2674 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore. 2675 2675 */ 2676 if ( pVCpu->h waccm.s.idLastCpu != pCpu->idCpu2677 || pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)2676 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2677 || pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 2678 2678 { 2679 2679 /* Force a TLB flush on VM entry. */ 2680 pVCpu->h waccm.s.fForceTLBFlush = true;2680 pVCpu->hm.s.fForceTLBFlush = true; 2681 2681 } 2682 2682 … … 2685 2685 */ 2686 2686 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH)) 2687 pVCpu->h waccm.s.fForceTLBFlush = true;2688 2689 pVCpu->h waccm.s.idLastCpu = pCpu->idCpu;2690 2691 if (pVCpu->h waccm.s.fForceTLBFlush)2687 pVCpu->hm.s.fForceTLBFlush = true; 2688 2689 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2690 2691 if (pVCpu->hm.s.fForceTLBFlush) 2692 2692 { 2693 2693 ++pCpu->uCurrentASID; 2694 if (pCpu->uCurrentASID >= pVM->h waccm.s.uMaxASID)2694 if (pCpu->uCurrentASID >= pVM->hm.s.uMaxASID) 2695 2695 { 2696 2696 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */ … … 2699 2699 } 2700 2700 else 2701 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushASID);2702 2703 pVCpu->h waccm.s.fForceTLBFlush = false;2704 pVCpu->h waccm.s.cTLBFlushes = pCpu->cTLBFlushes;2705 pVCpu->h waccm.s.uCurrentASID = pCpu->uCurrentASID;2701 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushASID); 2702 2703 pVCpu->hm.s.fForceTLBFlush = false; 2704 pVCpu->hm.s.cTLBFlushes = pCpu->cTLBFlushes; 2705 pVCpu->hm.s.uCurrentASID = pCpu->uCurrentASID; 2706 2706 if (pCpu->fFlushASIDBeforeUse) 2707 hmR0VmxFlushVPID(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);2707 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */); 2708 2708 } 2709 2709 else 2710 2710 { 2711 AssertMsg(pVCpu->h waccm.s.uCurrentASID && pCpu->uCurrentASID,2712 ("h waccm->uCurrentASID=%lu hwaccm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",2713 pVCpu->h waccm.s.uCurrentASID, pVCpu->hwaccm.s.cTLBFlushes,2711 AssertMsg(pVCpu->hm.s.uCurrentASID && pCpu->uCurrentASID, 2712 ("hm->uCurrentASID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n", 2713 pVCpu->hm.s.uCurrentASID, pVCpu->hm.s.cTLBFlushes, 2714 2714 pCpu->uCurrentASID, pCpu->cTLBFlushes)); 2715 2715 2716 2716 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should 2717 * not be executed. See h waccmQueueInvlPage() where it is commented2717 * not be executed. See hmQueueInvlPage() where it is commented 2718 2718 * out. Support individual entry flushing someday. */ 2719 2719 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) … … 2723 2723 * as supported by the CPU. 2724 2724 */ 2725 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)2725 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 2726 2726 { 2727 for (unsigned i = 0; i < pVCpu->h waccm.s.TlbShootdown.cPages; i++)2728 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->h waccm.s.TlbShootdown.aPages[i]);2727 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++) 2728 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]); 2729 2729 } 2730 2730 else 2731 hmR0VmxFlushVPID(pVM, pVCpu, pVM->h waccm.s.vmx.enmFlushVPID, 0 /* GCPtr */);2732 } 2733 } 2734 pVCpu->h waccm.s.TlbShootdown.cPages = 0;2731 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVPID, 0 /* GCPtr */); 2732 } 2733 } 2734 pVCpu->hm.s.TlbShootdown.cPages = 0; 2735 2735 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2736 2736 2737 AssertMsg(pVCpu->h waccm.s.cTLBFlushes == pCpu->cTLBFlushes,2738 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->h waccm.s.cTLBFlushes, pCpu->cTLBFlushes));2739 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->h waccm.s.uMaxASID,2737 AssertMsg(pVCpu->hm.s.cTLBFlushes == pCpu->cTLBFlushes, 2738 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTLBFlushes, pCpu->cTLBFlushes)); 2739 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID, 2740 2740 ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 2741 AssertMsg(pVCpu->h waccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID,2742 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->h waccm.s.uCurrentASID));2743 2744 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->h waccm.s.uCurrentASID);2741 AssertMsg(pVCpu->hm.s.uCurrentASID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID, 2742 ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID)); 2743 2744 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentASID); 2745 2745 AssertRC(rc); 2746 2746 2747 2747 # ifdef VBOX_WITH_STATISTICS 2748 if (pVCpu->h waccm.s.fForceTLBFlush)2749 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatFlushTLBWorldSwitch);2748 if (pVCpu->hm.s.fForceTLBFlush) 2749 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTLBWorldSwitch); 2750 2750 else 2751 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatNoFlushTLBWorldSwitch);2751 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch); 2752 2752 # endif 2753 2753 } … … 2764 2764 VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2765 2765 { 2766 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatEntry, x);2767 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit1);2768 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->h waccm.s.StatExit2);2766 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 2767 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 2768 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); 2769 2769 2770 2770 VBOXSTRICTRC rc = VINF_SUCCESS; … … 2789 2789 #endif 2790 2790 2791 Assert(!(pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)2792 || (pVCpu->h waccm.s.vmx.pbVAPIC && pVM->hwaccm.s.vmx.pAPIC));2791 Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 2792 || (pVCpu->hm.s.vmx.pbVAPIC && pVM->hm.s.vmx.pAPIC)); 2793 2793 2794 2794 /* … … 2796 2796 */ 2797 2797 if ( CPUMIsGuestInLongModeEx(pCtx) 2798 || ( (( pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)2799 || pVM->h waccm.s.fTRPPatchingAllowed)2800 && pVM->h waccm.s.fHasIoApic)2798 || ( (( pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 2799 || pVM->hm.s.fTRPPatchingAllowed) 2800 && pVM->hm.s.fHasIoApic) 2801 2801 ) 2802 2802 { … … 2807 2807 2808 2808 /* This is not ideal, but if we don't clear the event injection in the VMCS right here, 2809 * we may end up injecting some stale event into a VM, including injecting an event that 2809 * we may end up injecting some stale event into a VM, including injecting an event that 2810 2810 * originated before a VM reset *after* the VM has been reset. See @bugref{6220}. 2811 2811 */ … … 2821 2821 2822 2822 /* allowed zero */ 2823 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)2823 if ((val2 & pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) 2824 2824 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n")); 2825 2825 2826 2826 /* allowed one */ 2827 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)2827 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0) 2828 2828 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n")); 2829 2829 … … 2835 2835 * Must be set according to the MSR, but can be cleared if nested paging is used. 2836 2836 */ 2837 if (pVM->h waccm.s.fNestedPaging)2837 if (pVM->hm.s.fNestedPaging) 2838 2838 { 2839 2839 val2 |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT … … 2843 2843 2844 2844 /* allowed zero */ 2845 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)2845 if ((val2 & pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) 2846 2846 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n")); 2847 2847 2848 2848 /* allowed one */ 2849 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)2849 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0) 2850 2850 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n")); 2851 2851 … … 2855 2855 2856 2856 /* allowed zero */ 2857 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0)2857 if ((val2 & pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) 2858 2858 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n")); 2859 2859 2860 2860 /* allowed one */ 2861 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_entry.n.allowed1) != 0)2861 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_entry.n.allowed1) != 0) 2862 2862 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n")); 2863 2863 … … 2867 2867 2868 2868 /* allowed zero */ 2869 if ((val2 & pVM->h waccm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0)2869 if ((val2 & pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) 2870 2870 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n")); 2871 2871 2872 2872 /* allowed one */ 2873 if ((val2 & ~pVM->h waccm.s.vmx.msr.vmx_exit.n.allowed1) != 0)2873 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_exit.n.allowed1) != 0) 2874 2874 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n")); 2875 2875 } … … 2878 2878 2879 2879 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 2880 pVCpu->h waccm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS();2880 pVCpu->hm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS(); 2881 2881 #endif 2882 2882 … … 2885 2885 */ 2886 2886 ResumeExecution: 2887 if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->h waccm.s.StatEntry))2888 STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit2, &pVCpu->hwaccm.s.StatEntry, x);2889 AssertMsg(pVCpu->h waccm.s.idEnteredCpu == RTMpCpuId(),2887 if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry)) 2888 STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x); 2889 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 2890 2890 ("Expected %d, I'm %d; cResume=%d exitReason=%RGv exitQualification=%RGv\n", 2891 (int)pVCpu->h waccm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));2892 Assert(!H WACCMR0SuspendPending());2891 (int)pVCpu->hm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification)); 2892 Assert(!HMR0SuspendPending()); 2893 2893 /* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */ 2894 2894 Assert(fWasInLongMode == CPUMIsGuestInLongModeEx(pCtx)); … … 2897 2897 * Safety precaution; looping for too long here can have a very bad effect on the host. 2898 2898 */ 2899 if (RT_UNLIKELY(++cResume > pVM->h waccm.s.cMaxResumeLoops))2900 { 2901 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMaxResume);2899 if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops)) 2900 { 2901 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 2902 2902 rc = VINF_EM_RAW_INTERRUPT; 2903 2903 goto end; … … 2947 2947 * Check for pending actions that force us to go back to ring-3. 2948 2948 */ 2949 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)2950 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_H WACCM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))2949 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 2950 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST)) 2951 2951 { 2952 2952 /* Check if a sync operation is pending. */ … … 2967 2967 #endif 2968 2968 { 2969 if ( VM_FF_ISPENDING(pVM, VM_FF_H WACCM_TO_R3_MASK)2970 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_H WACCM_TO_R3_MASK))2969 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK) 2970 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 2971 2971 { 2972 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatSwitchToR3);2972 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3); 2973 2973 rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 2974 2974 goto end; … … 3013 3013 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 3014 3014 { 3015 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPreemptPending);3015 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending); 3016 3016 rc = VINF_EM_RAW_INTERRUPT; 3017 3017 goto end; … … 3045 3045 AssertRC(rc2); 3046 3046 /* The TPR can be found at offset 0x80 in the APIC mmio page. */ 3047 pVCpu->h waccm.s.vmx.pbVAPIC[0x80] = u8LastTPR;3047 pVCpu->hm.s.vmx.pbVAPIC[0x80] = u8LastTPR; 3048 3048 3049 3049 /* … … 3059 3059 AssertRC(VBOXSTRICTRC_VAL(rc)); 3060 3060 3061 if (pVM->h waccm.s.fTPRPatchingActive)3061 if (pVM->hm.s.fTPRPatchingActive) 3062 3062 { 3063 3063 Assert(!CPUMIsGuestInLongModeEx(pCtx)); … … 3083 3083 3084 3084 #ifdef LOG_ENABLED 3085 if ( pVM->h waccm.s.fNestedPaging3086 || pVM->h waccm.s.vmx.fVPID)3087 { 3088 PHMGLOBLCPUINFO pCpu = H WACCMR0GetCurrentCpu();3089 if (pVCpu->h waccm.s.idLastCpu != pCpu->idCpu)3090 { 3091 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->h waccm.s.idLastCpu,3085 if ( pVM->hm.s.fNestedPaging 3086 || pVM->hm.s.vmx.fVPID) 3087 { 3088 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); 3089 if (pVCpu->hm.s.idLastCpu != pCpu->idCpu) 3090 { 3091 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, 3092 3092 pCpu->idCpu)); 3093 3093 } 3094 else if (pVCpu->h waccm.s.cTLBFlushes != pCpu->cTLBFlushes)3095 { 3096 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->h waccm.s.cTLBFlushes,3094 else if (pVCpu->hm.s.cTLBFlushes != pCpu->cTLBFlushes) 3095 { 3096 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTLBFlushes, 3097 3097 pCpu->cTLBFlushes)); 3098 3098 } … … 3119 3119 * Save the host state first. 3120 3120 */ 3121 if (pVCpu->h waccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)3121 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT) 3122 3122 { 3123 3123 rc = VMXR0SaveHostState(pVM, pVCpu); … … 3132 3132 * Load the guest state. 3133 3133 */ 3134 if (!pVCpu->h waccm.s.fContextUseFlags)3134 if (!pVCpu->hm.s.fContextUseFlags) 3135 3135 { 3136 3136 VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx); 3137 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatLoadMinimal);3137 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 3138 3138 } 3139 3139 else … … 3145 3145 goto end; 3146 3146 } 3147 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatLoadFull);3147 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 3148 3148 } 3149 3149 … … 3163 3163 3164 3164 /* Set TLB flush state as checked until we return from the world switch. */ 3165 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, true);3165 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); 3166 3166 /* Deal with tagged TLB setup and invalidation. */ 3167 pVM->h waccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu);3167 pVM->hm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu); 3168 3168 3169 3169 /* … … 3180 3180 3181 3181 /* All done! Let's start VM execution. */ 3182 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatEntry, &pVCpu->hwaccm.s.StatInGC, x);3182 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x); 3183 3183 Assert(idCpuCheck == RTMpCpuId()); 3184 3184 3185 3185 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 3186 pVCpu->h waccm.s.vmx.VMCSCache.cResume = cResume;3187 pVCpu->h waccm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();3186 pVCpu->hm.s.vmx.VMCSCache.cResume = cResume; 3187 pVCpu->hm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS(); 3188 3188 #endif 3189 3189 … … 3191 3191 * Save the current TPR value in the LSTAR MSR so our patches can access it. 3192 3192 */ 3193 if (pVM->h waccm.s.fTPRPatchingActive)3194 { 3195 Assert(pVM->h waccm.s.fTPRPatchingActive);3193 if (pVM->hm.s.fTPRPatchingActive) 3194 { 3195 Assert(pVM->hm.s.fTPRPatchingActive); 3196 3196 u64OldLSTAR = ASMRdMsr(MSR_K8_LSTAR); 3197 3197 ASMWrMsr(MSR_K8_LSTAR, u8LastTPR); … … 3205 3205 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}. 3206 3206 */ 3207 if ( (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)3208 && !(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))3209 { 3210 pVCpu->h waccm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);3207 if ( (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 3208 && !(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 3209 { 3210 pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX); 3211 3211 uint64_t u64GuestTSCAux = 0; 3212 3212 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux); … … 3217 3217 3218 3218 #ifdef VBOX_WITH_KERNEL_USING_XMM 3219 rc = h waccmR0VMXStartVMWrapXMM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hwaccm.s.vmx.pfnStartVM);3219 rc = hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM); 3220 3220 #else 3221 rc = pVCpu->h waccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu);3222 #endif 3223 ASMAtomicWriteBool(&pVCpu->h waccm.s.fCheckedTLBFlush, false);3224 ASMAtomicIncU32(&pVCpu->h waccm.s.cWorldSwitchExits);3221 rc = pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu); 3222 #endif 3223 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); 3224 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); 3225 3225 3226 3226 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */ 3227 if (!(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))3227 if (!(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT)) 3228 3228 { 3229 3229 #ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 3230 3230 /* Restore host's TSC_AUX. */ 3231 if (pVCpu->h waccm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)3232 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->h waccm.s.u64HostTSCAux);3231 if (pVCpu->hm.s.vmx.proc_ctls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 3232 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux); 3233 3233 #endif 3234 3234 3235 3235 TMCpuTickSetLastSeen(pVCpu, 3236 ASMReadTSC() + pVCpu->h waccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);3236 ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */); 3237 3237 } 3238 3238 … … 3244 3244 * Restore the host LSTAR MSR if the guest could have changed it. 3245 3245 */ 3246 if (pVM->h waccm.s.fTPRPatchingActive)3247 { 3248 Assert(pVM->h waccm.s.fTPRPatchingActive);3249 pVCpu->h waccm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);3246 if (pVM->hm.s.fTPRPatchingActive) 3247 { 3248 Assert(pVM->hm.s.fTPRPatchingActive); 3249 pVCpu->hm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 3250 3250 ASMWrMsr(MSR_K8_LSTAR, u64OldLSTAR); 3251 3251 } 3252 3252 3253 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatInGC, &pVCpu->hwaccm.s.StatExit1, x);3253 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x); 3254 3254 ASMSetFlags(uOldEFlags); 3255 3255 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION … … 3257 3257 #endif 3258 3258 3259 AssertMsg(!pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hwaccm.s.vmx.VMCSCache.Write.cValidEntries=%d\n",3260 pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries));3259 AssertMsg(!pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries=%d\n", 3260 pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries)); 3261 3261 3262 3262 /* In case we execute a goto ResumeExecution later on. */ 3263 pVCpu->h waccm.s.fResumeVM = true;3264 pVCpu->h waccm.s.fForceTLBFlush = false;3263 pVCpu->hm.s.fResumeVM = true; 3264 pVCpu->hm.s.fForceTLBFlush = false; 3265 3265 3266 3266 /* … … 3281 3281 /* Investigate why there was a VM-exit. */ 3282 3282 rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_EXIT_REASON, &exitReason); 3283 STAM_COUNTER_INC(&pVCpu->h waccm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);3283 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]); 3284 3284 3285 3285 exitReason &= 0xffff; /* bit 0-15 contain the exit code. */ … … 3311 3311 rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_INFO, &val); 3312 3312 AssertRC(rc2); 3313 pVCpu->h waccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);3314 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->h waccm.s.Event.intInfo)3313 pVCpu->hm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val); 3314 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo) 3315 3315 /* Ignore 'int xx' as they'll be restarted anyway. */ 3316 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->h waccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW3316 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW 3317 3317 /* Ignore software exceptions (such as int3) as they'll reoccur when we restart the instruction anyway. */ 3318 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->h waccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)3319 { 3320 Assert(!pVCpu->h waccm.s.Event.fPending);3321 pVCpu->h waccm.s.Event.fPending = true;3318 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT) 3319 { 3320 Assert(!pVCpu->hm.s.Event.fPending); 3321 pVCpu->hm.s.Event.fPending = true; 3322 3322 /* Error code present? */ 3323 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->h waccm.s.Event.intInfo))3323 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo)) 3324 3324 { 3325 3325 rc2 = VMXReadCachedVMCS(VMX_VMCS32_RO_IDT_ERRCODE, &val); 3326 3326 AssertRC(rc2); 3327 pVCpu->h waccm.s.Event.errCode = val;3327 pVCpu->hm.s.Event.errCode = val; 3328 3328 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n", 3329 pVCpu->h waccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));3329 pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val)); 3330 3330 } 3331 3331 else 3332 3332 { 3333 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->h waccm.s.Event.intInfo,3333 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hm.s.Event.intInfo, 3334 3334 (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 3335 pVCpu->h waccm.s.Event.errCode = 0;3335 pVCpu->hm.s.Event.errCode = 0; 3336 3336 } 3337 3337 } 3338 3338 #ifdef VBOX_STRICT 3339 else if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->h waccm.s.Event.intInfo)3339 else if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.intInfo) 3340 3340 /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */ 3341 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->h waccm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT)3341 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT) 3342 3342 { 3343 3343 Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", 3344 pVCpu->h waccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));3344 pVCpu->hm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 3345 3345 } 3346 3346 3347 3347 if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE) 3348 H WACCMDumpRegs(pVM, pVCpu, pCtx);3348 HMDumpRegs(pVM, pVCpu, pCtx); 3349 3349 #endif 3350 3350 … … 3359 3359 */ 3360 3360 if ( fSetupTPRCaching 3361 && u8LastTPR != pVCpu->h waccm.s.vmx.pbVAPIC[0x80])3362 { 3363 rc2 = PDMApicSetTPR(pVCpu, pVCpu->h waccm.s.vmx.pbVAPIC[0x80]);3361 && u8LastTPR != pVCpu->hm.s.vmx.pbVAPIC[0x80]) 3362 { 3363 rc2 = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVAPIC[0x80]); 3364 3364 AssertRC(rc2); 3365 3365 } … … 3369 3369 exitReason, (uint64_t)exitQualification, pCtx->cs.Sel, pCtx->rip, (uint64_t)intInfo); 3370 3370 #endif 3371 STAM_PROFILE_ADV_STOP_START(&pVCpu->h waccm.s.StatExit1, &pVCpu->hwaccm.s.StatExit2, x);3371 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x); 3372 3372 3373 3373 /* Some cases don't need a complete resync of the guest CPU state; handle them here. */ … … 3392 3392 break; 3393 3393 } 3394 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatExit2Sub3, y3);3394 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub3, y3); 3395 3395 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo)) 3396 3396 { … … 3423 3423 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 3424 3424 3425 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowNM);3425 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM); 3426 3426 3427 3427 /* Continue execution. */ 3428 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;3429 3430 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3428 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 3429 3430 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3431 3431 goto ResumeExecution; 3432 3432 } 3433 3433 3434 3434 Log(("Forward #NM fault to the guest\n")); 3435 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNM);3435 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM); 3436 3436 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 3437 3437 cbInstr, 0); 3438 3438 AssertRC(rc2); 3439 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3439 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3440 3440 goto ResumeExecution; 3441 3441 } … … 3444 3444 { 3445 3445 #ifdef VBOX_ALWAYS_TRAP_PF 3446 if (pVM->h waccm.s.fNestedPaging)3446 if (pVM->hm.s.fNestedPaging) 3447 3447 { 3448 3448 /* … … 3454 3454 Assert(CPUMIsGuestInPagedProtectedModeEx(pCtx)); 3455 3455 3456 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);3456 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 3457 3457 3458 3458 /* Now we must update CR2. */ … … 3462 3462 AssertRC(rc2); 3463 3463 3464 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3464 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3465 3465 goto ResumeExecution; 3466 3466 } 3467 3467 #else 3468 Assert(!pVM->h waccm.s.fNestedPaging);3469 #endif 3470 3471 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING3468 Assert(!pVM->hm.s.fNestedPaging); 3469 #endif 3470 3471 #ifdef VBOX_HM_WITH_GUEST_PATCHING 3472 3472 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */ 3473 if ( pVM->h waccm.s.fTRPPatchingAllowed3474 && pVM->h waccm.s.pGuestPatchMem3473 if ( pVM->hm.s.fTRPPatchingAllowed 3474 && pVM->hm.s.pGuestPatchMem 3475 3475 && (exitQualification & 0xfff) == 0x080 3476 3476 && !(errCode & X86_TRAP_PF_P) /* not present */ 3477 3477 && CPUMGetGuestCPL(pVCpu) == 0 3478 3478 && !CPUMIsGuestInLongModeEx(pCtx) 3479 && pVM->h waccm.s.cPatches < RT_ELEMENTS(pVM->hwaccm.s.aPatches))3479 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches)) 3480 3480 { 3481 3481 RTGCPHYS GCPhysApicBase, GCPhys; … … 3488 3488 { 3489 3489 /* Only attempt to patch the instruction once. */ 3490 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);3490 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 3491 3491 if (!pPatch) 3492 3492 { 3493 rc = VINF_EM_H WACCM_PATCH_TPR_INSTR;3493 rc = VINF_EM_HM_PATCH_TPR_INSTR; 3494 3494 break; 3495 3495 } … … 3508 3508 && !(errCode & X86_TRAP_PF_P) /* not present */ 3509 3509 && fSetupTPRCaching 3510 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))3510 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 3511 3511 { 3512 3512 RTGCPHYS GCPhysApicBase, GCPhys; … … 3519 3519 { 3520 3520 Log(("Enable VT-x virtual APIC access filtering\n")); 3521 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->h waccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);3521 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P); 3522 3522 AssertRC(rc2); 3523 3523 } … … 3531 3531 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 3532 3532 Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, exitQualification ,errCode)); 3533 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPF);3533 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 3534 3534 3535 3535 TRPMResetTrap(pVCpu); 3536 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3536 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3537 3537 goto ResumeExecution; 3538 3538 } … … 3544 3544 Log2(("Forward page fault to the guest\n")); 3545 3545 3546 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestPF);3546 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); 3547 3547 /* The error code might have been changed. */ 3548 3548 errCode = TRPMGetErrorCode(pVCpu); … … 3556 3556 AssertRC(rc2); 3557 3557 3558 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3558 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3559 3559 goto ResumeExecution; 3560 3560 } … … 3564 3564 #endif 3565 3565 /* Need to go back to the recompiler to emulate the instruction. */ 3566 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitShadowPFEM);3566 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM); 3567 3567 TRPMResetTrap(pVCpu); 3568 3568 break; … … 3571 3571 case X86_XCPT_MF: /* Floating point exception. */ 3572 3572 { 3573 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestMF);3573 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 3574 3574 if (!(pCtx->cr0 & X86_CR0_NE)) 3575 3575 { … … 3584 3584 AssertRC(rc2); 3585 3585 3586 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3586 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3587 3587 goto ResumeExecution; 3588 3588 } … … 3602 3602 * 63:15 Reserved (0) 3603 3603 */ 3604 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDB);3604 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 3605 3605 3606 3606 /* Note that we don't support guest and host-initiated debugging at the same time. */ … … 3636 3636 AssertRC(rc2); 3637 3637 3638 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3638 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3639 3639 goto ResumeExecution; 3640 3640 } … … 3646 3646 case X86_XCPT_BP: /* Breakpoint. */ 3647 3647 { 3648 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestBP);3648 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); 3649 3649 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 3650 3650 if (rc == VINF_EM_RAW_GUEST_TRAP) … … 3654 3654 cbInstr, errCode); 3655 3655 AssertRC(rc2); 3656 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3656 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3657 3657 goto ResumeExecution; 3658 3658 } 3659 3659 if (rc == VINF_SUCCESS) 3660 3660 { 3661 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3661 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3662 3662 goto ResumeExecution; 3663 3663 } … … 3669 3669 { 3670 3670 uint32_t cbOp; 3671 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;3672 3673 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestGP);3671 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 3672 3673 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP); 3674 3674 #ifdef VBOX_STRICT 3675 3675 if ( !CPUMIsGuestInRealModeEx(pCtx) 3676 || !pVM->h waccm.s.vmx.pRealModeTSS)3676 || !pVM->hm.s.vmx.pRealModeTSS) 3677 3677 { 3678 3678 Log(("Trap %x at %04X:%RGv errorCode=%RGv\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, errCode)); … … 3680 3680 cbInstr, errCode); 3681 3681 AssertRC(rc2); 3682 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3682 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3683 3683 goto ResumeExecution; 3684 3684 } … … 3699 3699 case OP_CLI: 3700 3700 pCtx->eflags.Bits.u1IF = 0; 3701 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCli);3701 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 3702 3702 break; 3703 3703 … … 3709 3709 VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 3710 3710 AssertRC(rc2); 3711 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitSti);3711 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 3712 3712 break; 3713 3713 … … 3716 3716 rc = VINF_EM_HALT; 3717 3717 pCtx->rip += pDis->cbInstr; 3718 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitHlt);3718 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 3719 3719 break; 3720 3720 … … 3758 3758 pCtx->esp &= uMask; 3759 3759 3760 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPopf);3760 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 3761 3761 break; 3762 3762 } … … 3801 3801 pCtx->esp -= cbParm; 3802 3802 pCtx->esp &= uMask; 3803 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitPushf);3803 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 3804 3804 break; 3805 3805 } … … 3839 3839 LogFlow(("iret to %04x:%x\n", pCtx->cs.Sel, pCtx->ip)); 3840 3840 fUpdateRIP = false; 3841 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIret);3841 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 3842 3842 break; 3843 3843 } … … 3855 3855 AssertRC(VBOXSTRICTRC_VAL(rc)); 3856 3856 fUpdateRIP = false; 3857 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInt);3857 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 3858 3858 break; 3859 3859 } … … 3873 3873 AssertRC(VBOXSTRICTRC_VAL(rc)); 3874 3874 fUpdateRIP = false; 3875 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInt);3875 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 3876 3876 } 3877 3877 break; … … 3890 3890 AssertRC(VBOXSTRICTRC_VAL(rc)); 3891 3891 fUpdateRIP = false; 3892 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInt);3892 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt); 3893 3893 break; 3894 3894 } … … 3909 3909 * whole context to be done with it. 3910 3910 */ 3911 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;3911 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL; 3912 3912 3913 3913 /* Only resume if successful. */ 3914 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3914 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3915 3915 goto ResumeExecution; 3916 3916 } … … 3933 3933 switch (vector) 3934 3934 { 3935 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestDE); break;3936 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestUD); break;3937 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestSS); break;3938 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestNP); break;3939 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestXF); break;3935 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break; 3936 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break; 3937 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break; 3938 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break; 3939 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break; 3940 3940 } 3941 3941 … … 3945 3945 AssertRC(rc2); 3946 3946 3947 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3947 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3948 3948 goto ResumeExecution; 3949 3949 } 3950 3950 #endif 3951 3951 default: 3952 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitGuestXcpUnk);3952 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk); 3953 3953 if ( CPUMIsGuestInRealModeEx(pCtx) 3954 && pVM->h waccm.s.vmx.pRealModeTSS)3954 && pVM->hm.s.vmx.pRealModeTSS) 3955 3955 { 3956 3956 Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs.Sel, pCtx->eip, errCode)); … … 3966 3966 } 3967 3967 3968 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3968 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3969 3969 goto ResumeExecution; 3970 3970 } … … 3982 3982 } 3983 3983 3984 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub3, y3);3984 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3); 3985 3985 break; 3986 3986 } … … 3994 3994 RTGCPHYS GCPhys; 3995 3995 3996 Assert(pVM->h waccm.s.fNestedPaging);3996 Assert(pVM->hm.s.fNestedPaging); 3997 3997 3998 3998 rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); … … 4017 4017 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */ 4018 4018 && fSetupTPRCaching 4019 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))4019 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 4020 4020 { 4021 4021 RTGCPHYS GCPhysApicBase; … … 4025 4025 { 4026 4026 Log(("Enable VT-x virtual APIC access filtering\n")); 4027 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->h waccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);4027 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P); 4028 4028 AssertRC(rc2); 4029 4029 } … … 4049 4049 /* We've successfully synced our shadow pages, so let's just continue execution. */ 4050 4050 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode)); 4051 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitReasonNPF);4051 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNPF); 4052 4052 4053 4053 TRPMResetTrap(pVCpu); … … 4068 4068 RTGCPHYS GCPhys; 4069 4069 4070 Assert(pVM->h waccm.s.fNestedPaging);4070 Assert(pVM->hm.s.fNestedPaging); 4071 4071 4072 4072 rc2 = VMXReadVMCS64(VMX_VMCS_EXIT_PHYS_ADDR_FULL, &GCPhys); … … 4078 4078 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */ 4079 4079 && fSetupTPRCaching 4080 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))4080 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 4081 4081 { 4082 4082 RTGCPHYS GCPhysApicBase; … … 4086 4086 { 4087 4087 Log(("Enable VT-x virtual APIC access filtering\n")); 4088 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->h waccm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P);4088 rc2 = IOMMMIOMapMMIOHCPage(pVM, GCPhysApicBase, pVM->hm.s.vmx.pAPICPhys, X86_PTE_RW | X86_PTE_P); 4089 4089 AssertRC(rc2); 4090 4090 } … … 4116 4116 LogFlow(("VMX_EXIT_IRQ_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip, 4117 4117 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF)); 4118 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;4119 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4118 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT; 4119 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4120 4120 AssertRC(rc2); 4121 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIrqWindow);4121 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIrqWindow); 4122 4122 goto ResumeExecution; /* we check for pending guest interrupts there */ 4123 4123 4124 4124 case VMX_EXIT_WBINVD: /* 54 Guest software attempted to execute WBINVD. (conditional) */ 4125 4125 case VMX_EXIT_INVD: /* 13 Guest software attempted to execute INVD. (unconditional) */ 4126 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvd);4126 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd); 4127 4127 /* Skip instruction and continue directly. */ 4128 4128 pCtx->rip += cbInstr; … … 4133 4133 { 4134 4134 Log2(("VMX: Cpuid %x\n", pCtx->eax)); 4135 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCpuid);4135 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid); 4136 4136 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4137 4137 if (rc == VINF_SUCCESS) … … 4150 4150 { 4151 4151 Log2(("VMX: Rdpmc %x\n", pCtx->ecx)); 4152 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdpmc);4152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc); 4153 4153 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4154 4154 if (rc == VINF_SUCCESS) … … 4166 4166 { 4167 4167 Log2(("VMX: Rdtsc\n")); 4168 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtsc);4168 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc); 4169 4169 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4170 4170 if (rc == VINF_SUCCESS) … … 4182 4182 { 4183 4183 Log2(("VMX: Rdtscp\n")); 4184 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitRdtscp);4184 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp); 4185 4185 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 4186 4186 if (rc == VINF_SUCCESS) … … 4198 4198 { 4199 4199 Log2(("VMX: invlpg\n")); 4200 Assert(!pVM->h waccm.s.fNestedPaging);4201 4202 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitInvlpg);4200 Assert(!pVM->hm.s.fNestedPaging); 4201 4202 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg); 4203 4203 rc = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), exitQualification); 4204 4204 if (rc == VINF_SUCCESS) … … 4216 4216 Log2(("VMX: monitor\n")); 4217 4217 4218 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMonitor);4218 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor); 4219 4219 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4220 4220 if (rc == VINF_SUCCESS) … … 4230 4230 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */ 4231 4231 /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */ 4232 if ( pVM->h waccm.s.fTPRPatchingActive4232 if ( pVM->hm.s.fTPRPatchingActive 4233 4233 && pCtx->ecx == MSR_K8_LSTAR) 4234 4234 { … … 4249 4249 goto ResumeExecution; 4250 4250 } 4251 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_MSR;4251 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_MSR; 4252 4252 /* no break */ 4253 4253 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */ 4254 4254 { 4255 STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->h waccm.s.StatExitRdmsr : &pVCpu->hwaccm.s.StatExitWrmsr);4255 STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr); 4256 4256 4257 4257 /* … … 4274 4274 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */ 4275 4275 { 4276 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatExit2Sub2, y2);4276 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub2, y2); 4277 4277 4278 4278 switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification)) … … 4281 4281 { 4282 4282 Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))); 4283 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);4283 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 4284 4284 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4285 4285 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification), … … 4288 4288 { 4289 4289 case 0: 4290 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;4290 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3; 4291 4291 break; 4292 4292 case 2: 4293 4293 break; 4294 4294 case 3: 4295 Assert(!pVM->h waccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));4296 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;4295 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx)); 4296 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3; 4297 4297 break; 4298 4298 case 4: 4299 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;4299 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4; 4300 4300 break; 4301 4301 case 8: 4302 4302 /* CR8 contains the APIC TPR */ 4303 Assert(!(pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed14303 Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 4304 4304 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 4305 4305 break; … … 4315 4315 { 4316 4316 Log2(("VMX: mov x, crx\n")); 4317 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);4318 4319 Assert( !pVM->h waccm.s.fNestedPaging4317 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 4318 4319 Assert( !pVM->hm.s.fNestedPaging 4320 4320 || !CPUMIsGuestInPagedProtectedModeEx(pCtx) 4321 4321 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != DISCREG_CR3); … … 4323 4323 /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */ 4324 4324 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 4325 || !(pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));4325 || !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 4326 4326 4327 4327 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), … … 4334 4334 { 4335 4335 Log2(("VMX: clts\n")); 4336 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitCLTS);4336 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCLTS); 4337 4337 rc = EMInterpretCLTS(pVM, pVCpu); 4338 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;4338 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 4339 4339 break; 4340 4340 } … … 4343 4343 { 4344 4344 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification))); 4345 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitLMSW);4345 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLMSW); 4346 4346 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)); 4347 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;4347 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 4348 4348 break; 4349 4349 } … … 4357 4357 { 4358 4358 /* Only resume if successful. */ 4359 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub2, y2);4359 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2); 4360 4360 goto ResumeExecution; 4361 4361 } 4362 4362 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3); 4363 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub2, y2);4363 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2); 4364 4364 break; 4365 4365 } … … 4371 4371 { 4372 4372 /* Disable DRx move intercepts. */ 4373 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;4374 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4373 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 4374 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4375 4375 AssertRC(rc2); 4376 4376 … … 4390 4390 4391 4391 #ifdef VBOX_WITH_STATISTICS 4392 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxContextSwitch);4392 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch); 4393 4393 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE) 4394 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxWrite);4394 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 4395 4395 else 4396 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxRead);4396 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 4397 4397 #endif 4398 4398 … … 4406 4406 Log2(("VMX: mov DRx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 4407 4407 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification))); 4408 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxWrite);4408 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 4409 4409 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4410 4410 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 4411 4411 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)); 4412 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;4412 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 4413 4413 Log2(("DR7=%08x\n", pCtx->dr[7])); 4414 4414 } … … 4416 4416 { 4417 4417 Log2(("VMX: mov x, DRx\n")); 4418 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitDRxRead);4418 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 4419 4419 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 4420 4420 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification), … … 4437 4437 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */ 4438 4438 { 4439 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatExit2Sub1, y1);4439 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub1, y1); 4440 4440 uint32_t uPort; 4441 4441 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification); … … 4451 4451 { 4452 4452 rc = fIOWrite ? VINF_IOM_R3_IOPORT_WRITE : VINF_IOM_R3_IOPORT_READ; 4453 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4453 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4454 4454 break; 4455 4455 } … … 4459 4459 { 4460 4460 /* ins/outs */ 4461 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;4461 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 4462 4462 4463 4463 /* Disassemble manually to deal with segment prefixes. */ … … 4470 4470 { 4471 4471 Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize)); 4472 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringWrite);4472 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite); 4473 4473 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize); 4474 4474 } … … 4476 4476 { 4477 4477 Log2(("IOMInterpretINSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize)); 4478 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOStringRead);4478 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead); 4479 4479 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize); 4480 4480 } … … 4492 4492 if (fIOWrite) 4493 4493 { 4494 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIOWrite);4494 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite); 4495 4495 rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize); 4496 4496 if (rc == VINF_IOM_R3_IOPORT_WRITE) 4497 H WACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);4497 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize); 4498 4498 } 4499 4499 else … … 4501 4501 uint32_t u32Val = 0; 4502 4502 4503 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitIORead);4503 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead); 4504 4504 rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize); 4505 4505 if (IOM_SUCCESS(rc)) … … 4510 4510 else 4511 4511 if (rc == VINF_IOM_R3_IOPORT_READ) 4512 H WACCMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);4512 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize); 4513 4513 } 4514 4514 } … … 4527 4527 if (pCtx->dr[7] & X86_DR7_ENABLED_MASK) 4528 4528 { 4529 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatDRxIOCheck);4529 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIOCheck); 4530 4530 for (unsigned i = 0; i < 4; i++) 4531 4531 { … … 4575 4575 AssertRC(rc2); 4576 4576 4577 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4577 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4578 4578 goto ResumeExecution; 4579 4579 } 4580 4580 } 4581 4581 } 4582 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4582 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4583 4583 goto ResumeExecution; 4584 4584 } 4585 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4585 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4586 4586 break; 4587 4587 } … … 4600 4600 } 4601 4601 #endif 4602 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2Sub1, y1);4602 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1); 4603 4603 break; 4604 4604 } … … 4686 4686 Log(("VMX_EXIT_TASK_SWITCH: exit=%RX64\n", exitQualification)); 4687 4687 if ( (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(exitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT) 4688 && pVCpu->h waccm.s.Event.fPending)4688 && pVCpu->hm.s.Event.fPending) 4689 4689 { 4690 4690 /* Caused by an injected interrupt. */ 4691 pVCpu->h waccm.s.Event.fPending = false;4692 4693 Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->h waccm.s.Event.intInfo)));4694 Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->h waccm.s.Event.intInfo));4695 rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->h waccm.s.Event.intInfo), TRPM_HARDWARE_INT);4691 pVCpu->hm.s.Event.fPending = false; 4692 4693 Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo))); 4694 Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.intInfo)); 4695 rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.intInfo), TRPM_HARDWARE_INT); 4696 4696 AssertRC(rc2); 4697 4697 } … … 4702 4702 case VMX_EXIT_HLT: /* 12 Guest software attempted to execute HLT. */ 4703 4703 /* Check if external interrupts are pending; if so, don't switch back. */ 4704 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitHlt);4704 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 4705 4705 pCtx->rip++; /* skip hlt */ 4706 4706 if (EMShouldContinueAfterHalt(pVCpu, pCtx)) … … 4712 4712 case VMX_EXIT_MWAIT: /* 36 Guest software executed MWAIT. */ 4713 4713 Log2(("VMX: mwait\n")); 4714 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMwait);4714 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait); 4715 4715 rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 4716 4716 if ( rc == VINF_EM_HALT … … 4737 4737 case VMX_EXIT_MTF: /* 37 Exit due to Monitor Trap Flag. */ 4738 4738 LogFlow(("VMX_EXIT_MTF at %RGv\n", (RTGCPTR)pCtx->rip)); 4739 pVCpu->h waccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;4740 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4739 pVCpu->hm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG; 4740 rc2 = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4741 4741 AssertRC(rc2); 4742 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatExitMTF);4742 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMTF); 4743 4743 #if 0 4744 4744 DBGFDoneStepping(pVCpu); … … 4872 4872 && !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo)) 4873 4873 { 4874 STAM_COUNTER_INC(&pVCpu->h waccm.s.StatPendingHostIrq);4874 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 4875 4875 /* On the next entry we'll only sync the host context. */ 4876 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;4876 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; 4877 4877 } 4878 4878 else … … 4881 4881 /** @todo we can do better than this */ 4882 4882 /* Not in the VINF_PGM_CHANGE_MODE though! */ 4883 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;4883 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL; 4884 4884 } 4885 4885 … … 4890 4890 { 4891 4891 /* Try to extract more information about what might have gone wrong here. */ 4892 VMXGetActivateVMCS(&pVCpu->h waccm.s.vmx.lasterror.u64VMCSPhys);4893 pVCpu->h waccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS;4894 pVCpu->h waccm.s.vmx.lasterror.idEnteredCpu = pVCpu->hwaccm.s.idEnteredCpu;4895 pVCpu->h waccm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();4892 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys); 4893 pVCpu->hm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS; 4894 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu; 4895 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId(); 4896 4896 } 4897 4897 … … 4905 4905 #endif 4906 4906 4907 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit2, x);4908 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatExit1, x);4909 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatEntry, x);4907 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x); 4908 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x); 4909 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x); 4910 4910 Log2(("X")); 4911 4911 return VBOXSTRICTRC_TODO(rc); … … 4923 4923 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 4924 4924 { 4925 Assert(pVM->h waccm.s.vmx.fSupported);4925 Assert(pVM->hm.s.vmx.fSupported); 4926 4926 NOREF(pCpu); 4927 4927 … … 4934 4934 4935 4935 /* Activate the VMCS. */ 4936 int rc = VMXActivateVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);4936 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 4937 4937 if (RT_FAILURE(rc)) 4938 4938 return rc; 4939 4939 4940 pVCpu->h waccm.s.fResumeVM = false;4940 pVCpu->hm.s.fResumeVM = false; 4941 4941 return VINF_SUCCESS; 4942 4942 } … … 4953 4953 VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4954 4954 { 4955 Assert(pVM->h waccm.s.vmx.fSupported);4955 Assert(pVM->hm.s.vmx.fSupported); 4956 4956 4957 4957 #ifdef DEBUG … … 4959 4959 { 4960 4960 CPUMR0LoadHostDebugState(pVM, pVCpu); 4961 Assert(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);4961 Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT); 4962 4962 } 4963 4963 else … … 4972 4972 4973 4973 /* Enable DRx move intercepts again. */ 4974 pVCpu->h waccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;4975 int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->h waccm.s.vmx.proc_ctls);4974 pVCpu->hm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT; 4975 int rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.proc_ctls); 4976 4976 AssertRC(rc); 4977 4977 4978 4978 /* Resync the debug registers the next time. */ 4979 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;4979 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 4980 4980 } 4981 4981 else 4982 Assert(pVCpu->h waccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);4982 Assert(pVCpu->hm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT); 4983 4983 4984 4984 /* … … 4986 4986 * VMCS data back to memory. 4987 4987 */ 4988 int rc = VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);4988 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 4989 4989 AssertRC(rc); 4990 4990 … … 5006 5006 5007 5007 LogFlow(("hmR0VmxFlushEPT %d\n", enmFlush)); 5008 Assert(pVM->h waccm.s.fNestedPaging);5009 descriptor[0] = pVCpu->h waccm.s.vmx.GCPhysEPTP;5008 Assert(pVM->hm.s.fNestedPaging); 5009 descriptor[0] = pVCpu->hm.s.vmx.GCPhysEPTP; 5010 5010 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 VMX Instructions */ 5011 5011 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]); 5012 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->h waccm.s.vmx.GCPhysEPTP, rc));5012 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hm.s.vmx.GCPhysEPTP, rc)); 5013 5013 } 5014 5014 … … 5029 5029 uint64_t descriptor[2]; 5030 5030 5031 Assert(pVM->h waccm.s.vmx.fVPID);5031 Assert(pVM->hm.s.vmx.fVPID); 5032 5032 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS) 5033 5033 { … … 5038 5038 { 5039 5039 AssertPtr(pVCpu); 5040 AssertMsg(pVCpu->h waccm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));5041 AssertMsg(pVCpu->h waccm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hwaccm.s.uCurrentASID));5042 descriptor[0] = pVCpu->h waccm.s.uCurrentASID;5040 AssertMsg(pVCpu->hm.s.uCurrentASID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID)); 5041 AssertMsg(pVCpu->hm.s.uCurrentASID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID)); 5042 descriptor[0] = pVCpu->hm.s.uCurrentASID; 5043 5043 descriptor[1] = GCPtr; 5044 5044 } 5045 5045 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc); 5046 5046 AssertMsg(rc == VINF_SUCCESS, 5047 ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->h waccm.s.uCurrentASID : 0, GCPtr, rc));5047 ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentASID : 0, GCPtr, rc)); 5048 5048 } 5049 5049 … … 5073 5073 * function maybe called in a loop with individual addresses. 5074 5074 */ 5075 if (pVM->h waccm.s.vmx.fVPID)5075 if (pVM->hm.s.vmx.fVPID) 5076 5076 { 5077 5077 /* If we can flush just this page do it, otherwise flush as little as possible. */ 5078 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)5078 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 5079 5079 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt); 5080 5080 else 5081 5081 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 5082 5082 } 5083 else if (pVM->h waccm.s.fNestedPaging)5083 else if (pVM->hm.s.fNestedPaging) 5084 5084 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 5085 5085 } … … 5147 5147 Log(("Current stack %08x\n", &rc2)); 5148 5148 5149 pVCpu->h waccm.s.vmx.lasterror.ulInstrError = instrError;5150 pVCpu->h waccm.s.vmx.lasterror.ulExitReason = exitReason;5149 pVCpu->hm.s.vmx.lasterror.ulInstrError = instrError; 5150 pVCpu->hm.s.vmx.lasterror.ulExitReason = exitReason; 5151 5151 5152 5152 #ifdef VBOX_STRICT … … 5183 5183 { 5184 5184 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5185 H WACCMR0DumpDescriptor(pDesc, val, "CS: ");5185 HMR0DumpDescriptor(pDesc, val, "CS: "); 5186 5186 } 5187 5187 … … 5191 5191 { 5192 5192 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5193 H WACCMR0DumpDescriptor(pDesc, val, "DS: ");5193 HMR0DumpDescriptor(pDesc, val, "DS: "); 5194 5194 } 5195 5195 … … 5199 5199 { 5200 5200 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5201 H WACCMR0DumpDescriptor(pDesc, val, "ES: ");5201 HMR0DumpDescriptor(pDesc, val, "ES: "); 5202 5202 } 5203 5203 … … 5207 5207 { 5208 5208 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5209 H WACCMR0DumpDescriptor(pDesc, val, "FS: ");5209 HMR0DumpDescriptor(pDesc, val, "FS: "); 5210 5210 } 5211 5211 … … 5215 5215 { 5216 5216 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5217 H WACCMR0DumpDescriptor(pDesc, val, "GS: ");5217 HMR0DumpDescriptor(pDesc, val, "GS: "); 5218 5218 } 5219 5219 … … 5223 5223 { 5224 5224 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5225 H WACCMR0DumpDescriptor(pDesc, val, "SS: ");5225 HMR0DumpDescriptor(pDesc, val, "SS: "); 5226 5226 } 5227 5227 … … 5231 5231 { 5232 5232 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK)); 5233 H WACCMR0DumpDescriptor(pDesc, val, "TR: ");5233 HMR0DumpDescriptor(pDesc, val, "TR: "); 5234 5234 } 5235 5235 … … 5292 5292 int rc; 5293 5293 5294 pCpu = H WACCMR0GetCurrentCpu();5294 pCpu = HMR0GetCurrentCpu(); 5295 5295 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 5296 5296 … … 5298 5298 pCache->uPos = 1; 5299 5299 pCache->interPD = PGMGetInterPaeCR3(pVM); 5300 pCache->pSwitcher = (uint64_t)pVM->h waccm.s.pfnHost32ToGuest64R0;5300 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0; 5301 5301 #endif 5302 5302 … … 5313 5313 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */ 5314 5314 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */ 5315 aParam[2] = (uint32_t)(pVCpu->h waccm.s.vmx.HCPhysVMCS); /* Param 2: VMCS physical address - Lo. */5316 aParam[3] = (uint32_t)(pVCpu->h waccm.s.vmx.HCPhysVMCS >> 32); /* Param 2: VMCS physical address - Hi. */5317 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].h waccm.s.vmx.VMCSCache);5315 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS); /* Param 2: VMCS physical address - Lo. */ 5316 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS >> 32); /* Param 2: VMCS physical address - Hi. */ 5317 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache); 5318 5318 aParam[5] = 0; 5319 5319 5320 5320 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 5321 pCtx->dr[4] = pVM->h waccm.s.vmx.pScratchPhys + 16 + 8;5322 *(uint32_t *)(pVM->h waccm.s.vmx.pScratch + 16 + 8) = 1;5323 #endif 5324 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->h waccm.s.pfnVMXGCStartVM64, 6, &aParam[0]);5321 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8; 5322 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 5323 #endif 5324 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]); 5325 5325 5326 5326 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 5327 Assert(*(uint32_t *)(pVM->h waccm.s.vmx.pScratch + 16 + 8) == 5);5327 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5); 5328 5328 Assert(pCtx->dr[4] == 10); 5329 *(uint32_t *)(pVM->h waccm.s.vmx.pScratch + 16 + 8) = 0xff;5329 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff; 5330 5330 #endif 5331 5331 5332 5332 #ifdef DEBUG 5333 5333 AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage)); 5334 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->h waccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,5335 pVCpu->h waccm.s.vmx.HCPhysVMCS));5334 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, 5335 pVCpu->hm.s.vmx.HCPhysVMCS)); 5336 5336 AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, 5337 5337 pCache->TestOut.HCPhysVMCS)); 5338 5338 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, 5339 5339 pCache->TestOut.pCache)); 5340 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].h waccm.s.vmx.VMCSCache),5341 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].h waccm.s.vmx.VMCSCache)));5340 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache), 5341 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache))); 5342 5342 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx, 5343 5343 pCache->TestOut.pCtx)); … … 5466 5466 RTHCUINTREG uOldEFlags; 5467 5467 5468 AssertReturn(pVM->h waccm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);5468 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 5469 5469 Assert(pfnHandler); 5470 Assert(pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Write.aField));5471 Assert(pVCpu->h waccm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hwaccm.s.vmx.VMCSCache.Read.aField));5470 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField)); 5471 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField)); 5472 5472 5473 5473 #ifdef VBOX_STRICT 5474 for (unsigned i=0;i<pVCpu->h waccm.s.vmx.VMCSCache.Write.cValidEntries;i++)5475 Assert(hmR0VmxIsValidWriteField(pVCpu->h waccm.s.vmx.VMCSCache.Write.aField[i]));5476 5477 for (unsigned i=0;i<pVCpu->h waccm.s.vmx.VMCSCache.Read.cValidEntries;i++)5478 Assert(hmR0VmxIsValidReadField(pVCpu->h waccm.s.vmx.VMCSCache.Read.aField[i]));5474 for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries;i++) 5475 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i])); 5476 5477 for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries;i++) 5478 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i])); 5479 5479 #endif 5480 5480 … … 5487 5487 #endif 5488 5488 5489 pCpu = H WACCMR0GetCurrentCpu();5489 pCpu = HMR0GetCurrentCpu(); 5490 5490 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 5491 5491 5492 5492 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */ 5493 VMXClearVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);5493 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 5494 5494 5495 5495 /* Leave VMX Root Mode. */ … … 5503 5503 CPUMPushHyper(pVCpu, paParam[i]); 5504 5504 5505 STAM_PROFILE_ADV_START(&pVCpu->h waccm.s.StatWorldSwitch3264, z);5505 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 5506 5506 5507 5507 /* Call switcher. */ 5508 rc = pVM->h waccm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));5509 STAM_PROFILE_ADV_STOP(&pVCpu->h waccm.s.StatWorldSwitch3264, z);5508 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); 5509 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 5510 5510 5511 5511 /* Make sure the VMX instructions don't cause #UD faults. */ … … 5521 5521 } 5522 5522 5523 rc2 = VMXActivateVMCS(pVCpu->h waccm.s.vmx.HCPhysVMCS);5523 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS); 5524 5524 AssertRC(rc2); 5525 5525 Assert(!(ASMGetFlags() & X86_EFL_IF)); … … 5609 5609 VMMR0DECL(int) VMXWriteCachedVMCSEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val) 5610 5610 { 5611 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;5611 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 5612 5612 5613 5613 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1, -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r43307 r43387 24 24 #include <VBox/vmm/stam.h> 25 25 #include <VBox/dis.h> 26 #include <VBox/vmm/h waccm.h>26 #include <VBox/vmm/hm.h> 27 27 #include <VBox/vmm/pgm.h> 28 #include <VBox/vmm/h wacc_vmx.h>28 #include <VBox/vmm/hm_vmx.h> 29 29 30 30 RT_C_DECLS_BEGIN … … 220 220 else \ 221 221 if ( CPUMIsGuestInRealModeEx(pCtx) \ 222 && !pVM->h waccm.s.vmx.fUnrestrictedGuest) \222 && !pVM->hm.s.vmx.fUnrestrictedGuest) \ 223 223 { \ 224 224 /* Must override this or else VT-x will fail with invalid guest state errors. */ \ … … 291 291 { 292 292 Assert(idxCache <= VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX); 293 *pVal = pVCpu->h waccm.s.vmx.VMCSCache.Read.aFieldVal[idxCache];293 *pVal = pVCpu->hm.s.vmx.VMCSCache.Read.aFieldVal[idxCache]; 294 294 return VINF_SUCCESS; 295 295 } -
trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp
r42222 r43387 28 28 #include <VBox/vmm/vmm.h> 29 29 #include <VBox/vmm/patm.h> 30 #include <VBox/vmm/h waccm.h>30 #include <VBox/vmm/hm.h> 31 31 32 32 #include <VBox/log.h> … … 374 374 PDMDEV_ASSERT_DEVINS(pDevIns); 375 375 LogFlow(("pdmR0DevHlp_GetVM: caller='%p'/%d\n", pDevIns, pDevIns->iInstance)); 376 return H WACCMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0));376 return HMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0)); 377 377 } 378 378 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r43379 r43387 37 37 #include <VBox/vmm/gmm.h> 38 38 #include <VBox/intnet.h> 39 #include <VBox/vmm/h waccm.h>39 #include <VBox/vmm/hm.h> 40 40 #include <VBox/param.h> 41 41 #include <VBox/err.h> … … 117 117 118 118 /* 119 * Initialize the VMM, GVMM, GMM, H WACCM, PGM (Darwin) and INTNET.119 * Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET. 120 120 */ 121 121 int rc = vmmInitFormatTypes(); … … 128 128 if (RT_SUCCESS(rc)) 129 129 { 130 rc = H WACCMR0Init();130 rc = HMR0Init(); 131 131 if (RT_SUCCESS(rc)) 132 132 { … … 188 188 else 189 189 LogRel(("ModuleInit: PGMRegisterStringFormatTypes -> %Rrc\n", rc)); 190 H WACCMR0Term();190 HMR0Term(); 191 191 } 192 192 else 193 LogRel(("ModuleInit: H WACCMR0Init -> %Rrc\n", rc));193 LogRel(("ModuleInit: HMR0Init -> %Rrc\n", rc)); 194 194 GMMR0Term(); 195 195 } … … 231 231 232 232 /* 233 * PGM (Darwin), H WACCM and PciRaw global cleanup.233 * PGM (Darwin), HM and PciRaw global cleanup. 234 234 */ 235 235 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 240 240 #endif 241 241 PGMDeregisterStringFormatTypes(); 242 H WACCMR0Term();242 HMR0Term(); 243 243 #ifdef VBOX_WITH_TRIPLE_FAULT_HACK 244 244 vmmR0TripleFaultHackTerm(); … … 342 342 { 343 343 /* 344 * Init H WACCM, CPUM and PGM (Darwin only).345 */ 346 rc = H WACCMR0InitVM(pVM);344 * Init HM, CPUM and PGM (Darwin only). 345 */ 346 rc = HMR0InitVM(pVM); 347 347 if (RT_SUCCESS(rc)) 348 348 { … … 370 370 PciRawR0TermVM(pVM); 371 371 #endif 372 H WACCMR0TermVM(pVM);372 HMR0TermVM(pVM); 373 373 } 374 374 } … … 410 410 PGMR0DynMapTermVM(pVM); 411 411 #endif 412 H WACCMR0TermVM(pVM);412 HMR0TermVM(pVM); 413 413 } 414 414 … … 603 603 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPendingRequest); 604 604 break; 605 case VINF_EM_H WACCM_PATCH_TPR_INSTR:605 case VINF_EM_HM_PATCH_TPR_INSTR: 606 606 STAM_COUNTER_INC(&pVM->vmm.s.StatRZRetPatchTPR); 607 607 break; … … 662 662 /* Some safety precautions first. */ 663 663 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 664 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* h waccm */664 if (RT_LIKELY( !pVM->vmm.s.fSwitcherDisabled /* hm */ 665 665 && pVM->cCpus == 1 /* !smp */ 666 666 && PGMGetHyperCR3(pVCpu))) … … 683 683 /* We might need to disable VT-x if the active switcher turns off paging. */ 684 684 bool fVTxDisabled; 685 int rc = H WACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);685 int rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 686 686 if (RT_SUCCESS(rc)) 687 687 { … … 705 705 706 706 /* Re-enable VT-x if previously turned off. */ 707 H WACCMR0LeaveSwitcher(pVM, fVTxDisabled);707 HMR0LeaveSwitcher(pVM, fVTxDisabled); 708 708 709 709 if ( rc == VINF_EM_RAW_INTERRUPT … … 770 770 #endif 771 771 int rc; 772 if (!H WACCMR0SuspendPending())772 if (!HMR0SuspendPending()) 773 773 { 774 rc = H WACCMR0Enter(pVM, pVCpu);774 rc = HMR0Enter(pVM, pVCpu); 775 775 if (RT_SUCCESS(rc)) 776 776 { 777 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, H WACCMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */778 int rc2 = H WACCMR0Leave(pVM, pVCpu);777 rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, HMR0RunGuestCode, pVM, pVCpu); /* this may resume code. */ 778 int rc2 = HMR0Leave(pVM, pVCpu); 779 779 AssertRC(rc2); 780 780 } … … 962 962 963 963 /* 964 * Attempt to enable h waccmode and check the current setting.964 * Attempt to enable hm mode and check the current setting. 965 965 */ 966 966 case VMMR0_DO_HWACC_ENABLE: 967 return H WACCMR0EnableAllCpus(pVM);967 return HMR0EnableAllCpus(pVM); 968 968 969 969 /* … … 971 971 */ 972 972 case VMMR0_DO_HWACC_SETUP_VM: 973 return H WACCMR0SetupVM(pVM);973 return HMR0SetupVM(pVM); 974 974 975 975 /* … … 981 981 bool fVTxDisabled; 982 982 983 /* Safety precaution as H WACCM can disable the switcher. */983 /* Safety precaution as HM can disable the switcher. */ 984 984 Assert(!pVM->vmm.s.fSwitcherDisabled); 985 985 if (RT_UNLIKELY(pVM->vmm.s.fSwitcherDisabled)) … … 999 999 1000 1000 /* We might need to disable VT-x if the active switcher turns off paging. */ 1001 rc = H WACCMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled);1001 rc = HMR0EnterSwitcher(pVM, pVM->vmm.s.enmSwitcher, &fVTxDisabled); 1002 1002 if (RT_FAILURE(rc)) 1003 1003 return rc; … … 1006 1006 1007 1007 /* Re-enable VT-x if previously turned off. */ 1008 H WACCMR0LeaveSwitcher(pVM, fVTxDisabled);1008 HMR0LeaveSwitcher(pVM, fVTxDisabled); 1009 1009 1010 1010 /** @todo dispatch interrupts? */ … … 1284 1284 if (idCpu == NIL_VMCPUID) 1285 1285 return VERR_INVALID_CPU_ID; 1286 return H WACCMR0TestSwitcher3264(pVM);1286 return HMR0TestSwitcher3264(pVM); 1287 1287 #endif 1288 1288 default: -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r42894 r43387 44 44 #include <VBox/vmm/dbgf.h> 45 45 #include <VBox/vmm/patm.h> 46 #include <VBox/vmm/h waccm.h>46 #include <VBox/vmm/hm.h> 47 47 #include <VBox/vmm/ssm.h> 48 48 #include "CPUMInternal.h" … … 1943 1943 * intercept CPUID instructions for user mode applications. 1944 1944 */ 1945 if (!H WACCMIsEnabled(pVM))1945 if (!HMIsEnabled(pVM)) 1946 1946 { 1947 1947 /* CPUID(0) */ … … 2510 2510 { 2511 2511 PVMCPU pVCpu = &pVM->aCpus[iCpu]; 2512 bool const fValid = H WACCMIsEnabled(pVM)2512 bool const fValid = HMIsEnabled(pVM) 2513 2513 || ( uVersion > CPUM_SAVED_STATE_VERSION_VER3_2 2514 2514 && !(pVCpu->cpum.s.fChanged & CPUM_CHANGED_HIDDEN_SEL_REGS_INVALID)); … … 3943 3943 pState->pvPageGC = GCPtr & PAGE_BASE_GC_MASK; 3944 3944 if ( MMHyperIsInsideArea(pState->pVM, pState->pvPageGC) 3945 && !H WACCMIsEnabled(pState->pVM))3945 && !HMIsEnabled(pState->pVM)) 3946 3946 { 3947 3947 pState->pvPageR3 = MMHyperRCToR3(pState->pVM, (RTRCPTR)pState->pvPageGC); -
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r42833 r43387 77 77 #endif 78 78 #include <VBox/vmm/em.h> 79 #include <VBox/vmm/h waccm.h>79 #include <VBox/vmm/hm.h> 80 80 #include "DBGFInternal.h" 81 81 #include <VBox/vmm/vm.h> … … 253 253 int cWait = 10; 254 254 # else 255 int cWait = H WACCMIsEnabled(pVM)255 int cWait = HMIsEnabled(pVM) 256 256 && ( enmEvent == DBGFEVENT_ASSERTION_HYPER 257 257 || enmEvent == DBGFEVENT_FATAL_ERROR) -
trunk/src/VBox/VMM/VMMR3/DBGFMem.cpp
r42165 r43387 24 24 #include <VBox/vmm/pgm.h> 25 25 #include <VBox/vmm/selm.h> 26 #include <VBox/vmm/h waccm.h>26 #include <VBox/vmm/hm.h> 27 27 #include "DBGFInternal.h" 28 28 #include <VBox/vmm/vm.h> … … 421 421 else 422 422 { 423 if (H WACCMIsEnabled(pVM))423 if (HMIsEnabled(pVM)) 424 424 rc = VERR_INVALID_STATE; 425 425 else -
trunk/src/VBox/VMM/VMMR3/EM.cpp
r42698 r43387 26 26 * 27 27 * The interpreted execution is only used to avoid switching between 28 * raw-mode/h waccm and the recompiler when fielding virtualization traps/faults.28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults. 29 29 * The interpretation is thus implemented as part of EM. 30 30 * … … 56 56 #include <VBox/vmm/pdmcritsect.h> 57 57 #include <VBox/vmm/pdmqueue.h> 58 #include <VBox/vmm/h waccm.h>58 #include <VBox/vmm/hm.h> 59 59 #include <VBox/vmm/patm.h> 60 60 #ifdef IEM_VERIFICATION_MODE … … 80 80 *******************************************************************************/ 81 81 #if 0 /* Disabled till after 2.1.0 when we've time to test it. */ 82 #define EM_NOTIFY_H WACCM82 #define EM_NOTIFY_HM 83 83 #endif 84 84 … … 632 632 633 633 /* 634 * Force rescheduling if in RAW, H WACCM or REM.634 * Force rescheduling if in RAW, HM or REM. 635 635 */ 636 636 return pVCpu->em.s.enmState == EMSTATE_RAW … … 1187 1187 1188 1188 X86EFLAGS EFlags = pCtx->eflags; 1189 if (H WACCMIsEnabled(pVM))1189 if (HMIsEnabled(pVM)) 1190 1190 { 1191 1191 /* … … 1196 1196 */ 1197 1197 if ( EMIsHwVirtExecutionEnabled(pVM) 1198 && H WACCMR3CanExecuteGuest(pVM, pCtx))1198 && HMR3CanExecuteGuest(pVM, pCtx)) 1199 1199 return EMSTATE_HWACC; 1200 1200 … … 1689 1689 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */ 1690 1690 && PATMAreInterruptsEnabled(pVM) 1691 && !H WACCMR3IsEventPending(pVCpu))1691 && !HMR3IsEventPending(pVCpu)) 1692 1692 { 1693 1693 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI); -
trunk/src/VBox/VMM/VMMR3/EMHM.cpp
r43373 r43387 38 38 #include <VBox/vmm/pdmcritsect.h> 39 39 #include <VBox/vmm/pdmqueue.h> 40 #include <VBox/vmm/h waccm.h>40 #include <VBox/vmm/hm.h> 41 41 #include "EMInternal.h" 42 42 #include "internal/em.h" … … 55 55 *******************************************************************************/ 56 56 #if 0 /* Disabled till after 2.1.0 when we've time to test it. */ 57 #define EM_NOTIFY_H WACCM57 #define EM_NOTIFY_HM 58 58 #endif 59 59 … … 66 66 static int emR3HwaccmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 67 67 68 #define EMHANDLERC_WITH_H WACCM68 #define EMHANDLERC_WITH_HM 69 69 #include "EMHandleRCTmpl.h" 70 70 … … 141 141 rc = emR3HwAccStep(pVM, pVCpu); 142 142 if ( rc != VINF_SUCCESS 143 || !H WACCMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx))143 || !HMR3CanExecuteGuest(pVM, pVCpu->em.s.pCtx)) 144 144 break; 145 145 } … … 216 216 if (RT_SUCCESS(rc)) 217 217 { 218 #ifdef EM_NOTIFY_H WACCM218 #ifdef EM_NOTIFY_HM 219 219 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC) 220 H WACCMR3NotifyEmulated(pVCpu);220 HMR3NotifyEmulated(pVCpu); 221 221 #endif 222 222 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a); … … 246 246 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a); 247 247 248 #ifdef EM_NOTIFY_H WACCM248 #ifdef EM_NOTIFY_HM 249 249 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC) 250 H WACCMR3NotifyEmulated(pVCpu);250 HMR3NotifyEmulated(pVCpu); 251 251 #endif 252 252 return rc; … … 288 288 289 289 /* Try to restart the io instruction that was refused in ring-0. */ 290 VBOXSTRICTRC rcStrict = H WACCMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);290 VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx); 291 291 if (IOM_SUCCESS(rcStrict)) 292 292 { … … 474 474 STAM_COUNTER_INC(&pVCpu->em.s.StatHwAccExecuteEntry); 475 475 476 #ifdef EM_NOTIFY_H WACCM477 H WACCMR3NotifyScheduled(pVCpu);476 #ifdef EM_NOTIFY_HM 477 HMR3NotifyScheduled(pVCpu); 478 478 #endif 479 479 … … 486 486 487 487 /* Check if a forced reschedule is pending. */ 488 if (H WACCMR3IsRescheduleRequired(pVM, pCtx))488 if (HMR3IsRescheduleRequired(pVM, pCtx)) 489 489 { 490 490 rc = VINF_EM_RESCHEDULE; … … 495 495 * Process high priority pre-execution raw-mode FFs. 496 496 */ 497 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in H WACCM mode; shouldn't be set really. */497 VMCPU_FF_CLEAR(pVCpu, (VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_TSS)); /* not relevant in HM mode; shouldn't be set really. */ 498 498 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) 499 499 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r43373 r43387 1 1 /* $Id$ */ 2 2 /** @file 3 * H WACCM - Intel/AMD VM Hardware Support Manager.3 * HM - Intel/AMD VM Hardware Support Manager. 4 4 */ 5 5 … … 19 19 * Header Files * 20 20 *******************************************************************************/ 21 #define LOG_GROUP LOG_GROUP_H WACCM21 #define LOG_GROUP LOG_GROUP_HM 22 22 #include <VBox/vmm/cpum.h> 23 23 #include <VBox/vmm/stam.h> … … 35 35 # include <VBox/vmm/rem.h> 36 36 #endif 37 #include <VBox/vmm/h wacc_vmx.h>38 #include <VBox/vmm/h wacc_svm.h>39 #include "H WACCMInternal.h"37 #include <VBox/vmm/hm_vmx.h> 38 #include <VBox/vmm/hm_svm.h> 39 #include "HMInternal.h" 40 40 #include <VBox/vmm/vm.h> 41 41 #include <VBox/err.h> … … 271 271 * Internal Functions * 272 272 *******************************************************************************/ 273 static DECLCALLBACK(int) h waccmR3Save(PVM pVM, PSSMHANDLE pSSM);274 static DECLCALLBACK(int) h waccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);275 static int h waccmR3InitCPU(PVM pVM);276 static int h waccmR3InitFinalizeR0(PVM pVM);277 static int h waccmR3TermCPU(PVM pVM);278 279 280 /** 281 * Initializes the H WACCM.273 static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM); 274 static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass); 275 static int hmR3InitCPU(PVM pVM); 276 static int hmR3InitFinalizeR0(PVM pVM); 277 static int hmR3TermCPU(PVM pVM); 278 279 280 /** 281 * Initializes the HM. 282 282 * 283 283 * @returns VBox status code. 284 284 * @param pVM Pointer to the VM. 285 285 */ 286 VMMR3DECL(int) H WACCMR3Init(PVM pVM)287 { 288 LogFlow(("H WACCMR3Init\n"));286 VMMR3DECL(int) HMR3Init(PVM pVM) 287 { 288 LogFlow(("HMR3Init\n")); 289 289 290 290 /* 291 291 * Assert alignment and sizes. 292 292 */ 293 AssertCompileMemberAlignment(VM, h waccm.s, 32);294 AssertCompile(sizeof(pVM->h waccm.s) <= sizeof(pVM->hwaccm.padding));293 AssertCompileMemberAlignment(VM, hm.s, 32); 294 AssertCompile(sizeof(pVM->hm.s) <= sizeof(pVM->hm.padding)); 295 295 296 296 /* Some structure checks. */ … … 315 315 * Register the saved state data unit. 316 316 */ 317 int rc = SSMR3RegisterInternal(pVM, "H WACCM", 0, HWACCM_SSM_VERSION, sizeof(HWACCM),317 int rc = SSMR3RegisterInternal(pVM, "HM", 0, HM_SSM_VERSION, sizeof(HM), 318 318 NULL, NULL, NULL, 319 NULL, h waccmR3Save, NULL,320 NULL, h waccmR3Load, NULL);319 NULL, hmR3Save, NULL, 320 NULL, hmR3Load, NULL); 321 321 if (RT_FAILURE(rc)) 322 322 return rc; 323 323 324 324 /* Misc initialisation. */ 325 pVM->h waccm.s.vmx.fSupported = false;326 pVM->h waccm.s.svm.fSupported = false;327 pVM->h waccm.s.vmx.fEnabled = false;328 pVM->h waccm.s.svm.fEnabled = false;329 330 pVM->h waccm.s.fNestedPaging = false;331 pVM->h waccm.s.fLargePages = false;325 pVM->hm.s.vmx.fSupported = false; 326 pVM->hm.s.svm.fSupported = false; 327 pVM->hm.s.vmx.fEnabled = false; 328 pVM->hm.s.svm.fEnabled = false; 329 330 pVM->hm.s.fNestedPaging = false; 331 pVM->hm.s.fLargePages = false; 332 332 333 333 /* Disabled by default. */ 334 pVM->fH WACCMEnabled = false;334 pVM->fHMEnabled = false; 335 335 336 336 /* … … 340 340 PCFGMNODE pHWVirtExt = CFGMR3GetChild(pRoot, "HWVirtExt/"); 341 341 /* Nested paging: disabled by default. */ 342 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->h waccm.s.fAllowNestedPaging, false);342 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableNestedPaging", &pVM->hm.s.fAllowNestedPaging, false); 343 343 AssertRC(rc); 344 344 345 345 /* Large pages: disabled by default. */ 346 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->h waccm.s.fLargePages, false);346 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableLargePages", &pVM->hm.s.fLargePages, false); 347 347 AssertRC(rc); 348 348 349 349 /* VT-x VPID: disabled by default. */ 350 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->h waccm.s.vmx.fAllowVPID, false);350 rc = CFGMR3QueryBoolDef(pHWVirtExt, "EnableVPID", &pVM->hm.s.vmx.fAllowVPID, false); 351 351 AssertRC(rc); 352 352 353 /* H WACCM support must be explicitely enabled in the configuration file. */354 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->h waccm.s.fAllowed, false);353 /* HM support must be explicitely enabled in the configuration file. */ 354 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Enabled", &pVM->hm.s.fAllowed, false); 355 355 AssertRC(rc); 356 356 357 357 /* TPR patching for 32 bits (Windows) guests with IO-APIC: disabled by default. */ 358 rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->h waccm.s.fTRPPatchingAllowed, false);358 rc = CFGMR3QueryBoolDef(pHWVirtExt, "TPRPatchingEnabled", &pVM->hm.s.fTRPPatchingAllowed, false); 359 359 AssertRC(rc); 360 360 361 361 #ifdef RT_OS_DARWIN 362 if (VMMIsHwVirtExtForced(pVM) != pVM->h waccm.s.fAllowed)362 if (VMMIsHwVirtExtForced(pVM) != pVM->hm.s.fAllowed) 363 363 #else 364 if (VMMIsHwVirtExtForced(pVM) && !pVM->h waccm.s.fAllowed)364 if (VMMIsHwVirtExtForced(pVM) && !pVM->hm.s.fAllowed) 365 365 #endif 366 366 { 367 367 AssertLogRelMsgFailed(("VMMIsHwVirtExtForced=%RTbool fAllowed=%RTbool\n", 368 VMMIsHwVirtExtForced(pVM), pVM->h waccm.s.fAllowed));369 return VERR_H WACCM_CONFIG_MISMATCH;368 VMMIsHwVirtExtForced(pVM), pVM->hm.s.fAllowed)); 369 return VERR_HM_CONFIG_MISMATCH; 370 370 } 371 371 372 372 if (VMMIsHwVirtExtForced(pVM)) 373 pVM->fH WACCMEnabled = true;373 pVM->fHMEnabled = true; 374 374 375 375 #if HC_ARCH_BITS == 32 … … 378 378 * (To use the default, don't set 64bitEnabled in CFGM.) 379 379 */ 380 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->h waccm.s.fAllow64BitGuests, false);380 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, false); 381 381 AssertLogRelRCReturn(rc, rc); 382 if (pVM->h waccm.s.fAllow64BitGuests)382 if (pVM->hm.s.fAllow64BitGuests) 383 383 { 384 384 # ifdef RT_OS_DARWIN 385 385 if (!VMMIsHwVirtExtForced(pVM)) 386 386 # else 387 if (!pVM->h waccm.s.fAllowed)387 if (!pVM->hm.s.fAllowed) 388 388 # endif 389 389 return VM_SET_ERROR(pVM, VERR_INVALID_PARAMETER, "64-bit guest support was requested without also enabling HWVirtEx (VT-x/AMD-V)."); … … 394 394 * via VBoxInternal/HWVirtExt/64bitEnabled=0. (ConsoleImpl2.cpp doesn't set this to false for 64-bit.)* 395 395 */ 396 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->h waccm.s.fAllow64BitGuests, true);396 rc = CFGMR3QueryBoolDef(pHWVirtExt, "64bitEnabled", &pVM->hm.s.fAllow64BitGuests, true); 397 397 AssertLogRelRCReturn(rc, rc); 398 398 #endif … … 405 405 * Default false for Mac OS X and Windows due to the higher risk of conflicts with other hypervisors. 406 406 */ 407 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->h waccm.s.fGlobalInit,407 rc = CFGMR3QueryBoolDef(pHWVirtExt, "Exclusive", &pVM->hm.s.fGlobalInit, 408 408 #if defined(RT_OS_DARWIN) || defined(RT_OS_WINDOWS) 409 409 false … … 414 414 415 415 /* Max number of resume loops. */ 416 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->h waccm.s.cMaxResumeLoops, 0 /* set by R0 later */);416 rc = CFGMR3QueryU32Def(pHWVirtExt, "MaxResumeLoops", &pVM->hm.s.cMaxResumeLoops, 0 /* set by R0 later */); 417 417 AssertRC(rc); 418 418 … … 422 422 423 423 /** 424 * Initializes the per-VCPU H WACCM.424 * Initializes the per-VCPU HM. 425 425 * 426 426 * @returns VBox status code. 427 427 * @param pVM Pointer to the VM. 428 428 */ 429 static int h waccmR3InitCPU(PVM pVM)430 { 431 LogFlow(("H WACCMR3InitCPU\n"));429 static int hmR3InitCPU(PVM pVM) 430 { 431 LogFlow(("HMR3InitCPU\n")); 432 432 433 433 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 435 435 PVMCPU pVCpu = &pVM->aCpus[i]; 436 436 437 pVCpu->h waccm.s.fActive = false;437 pVCpu->hm.s.fActive = false; 438 438 } 439 439 440 440 #ifdef VBOX_WITH_STATISTICS 441 STAM_REG(pVM, &pVM->h waccm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");442 STAM_REG(pVM, &pVM->h waccm.s.StatTPRPatchFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");443 STAM_REG(pVM, &pVM->h waccm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched.");444 STAM_REG(pVM, &pVM->h waccm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HWACCM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts.");441 STAM_REG(pVM, &pVM->hm.s.StatTPRPatchSuccess, STAMTYPE_COUNTER, "/HM/TPR/Patch/Success", STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched."); 442 STAM_REG(pVM, &pVM->hm.s.StatTPRPatchFailure, STAMTYPE_COUNTER, "/HM/TPR/Patch/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts."); 443 STAM_REG(pVM, &pVM->hm.s.StatTPRReplaceSuccess, STAMTYPE_COUNTER, "/HM/TPR/Replace/Success",STAMUNIT_OCCURENCES, "Number of times an instruction was successfully patched."); 444 STAM_REG(pVM, &pVM->hm.s.StatTPRReplaceFailure, STAMTYPE_COUNTER, "/HM/TPR/Replace/Failed", STAMUNIT_OCCURENCES, "Number of unsuccessful patch attempts."); 445 445 446 446 /* … … 452 452 int rc; 453 453 454 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu",455 "/PROF/H WACCM/CPU%d/Poke", i);454 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of RTMpPokeCpu", 455 "/PROF/HM/CPU%d/Poke", i); 456 456 AssertRC(rc); 457 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait",458 "/PROF/H WACCM/CPU%d/PokeWait", i);457 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPoke, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait", 458 "/PROF/HM/CPU%d/PokeWait", i); 459 459 AssertRC(rc); 460 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails",461 "/PROF/H WACCM/CPU%d/PokeWaitFailed", i);460 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatSpinPokeFailed, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of poke wait when RTMpPokeCpu fails", 461 "/PROF/HM/CPU%d/PokeWaitFailed", i); 462 462 AssertRC(rc); 463 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry",464 "/PROF/H WACCM/CPU%d/SwitchToGC", i);463 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatEntry, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode entry", 464 "/PROF/HM/CPU%d/SwitchToGC", i); 465 465 AssertRC(rc); 466 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1",467 "/PROF/H WACCM/CPU%d/SwitchFromGC_1", i);466 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 1", 467 "/PROF/HM/CPU%d/SwitchFromGC_1", i); 468 468 AssertRC(rc); 469 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2",470 "/PROF/H WACCM/CPU%d/SwitchFromGC_2", i);469 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of VMXR0RunGuestCode exit part 2", 470 "/PROF/HM/CPU%d/SwitchFromGC_2", i); 471 471 AssertRC(rc); 472 472 # if 1 /* temporary for tracking down darwin holdup. */ 473 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O",474 "/PROF/H WACCM/CPU%d/SwitchFromGC_2/Sub1", i);473 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub1, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - I/O", 474 "/PROF/HM/CPU%d/SwitchFromGC_2/Sub1", i); 475 475 AssertRC(rc); 476 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs",477 "/PROF/H WACCM/CPU%d/SwitchFromGC_2/Sub2", i);476 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub2, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - CRx RWs", 477 "/PROF/HM/CPU%d/SwitchFromGC_2/Sub2", i); 478 478 AssertRC(rc); 479 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions",480 "/PROF/H WACCM/CPU%d/SwitchFromGC_2/Sub3", i);479 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExit2Sub3, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Temporary - Exceptions", 480 "/PROF/HM/CPU%d/SwitchFromGC_2/Sub3", i); 481 481 AssertRC(rc); 482 482 # endif 483 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch",484 "/PROF/H WACCM/CPU%d/InGC", i);483 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatInGC, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of vmlaunch", 484 "/PROF/HM/CPU%d/InGC", i); 485 485 AssertRC(rc); 486 486 487 487 # if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 488 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher",489 "/PROF/H WACCM/CPU%d/Switcher3264", i);488 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatWorldSwitch3264, STAMTYPE_PROFILE, STAMVISIBILITY_USED, STAMUNIT_TICKS_PER_CALL, "Profiling of the 32/64 switcher", 489 "/PROF/HM/CPU%d/Switcher3264", i); 490 490 AssertRC(rc); 491 491 # endif 492 492 493 # define H WACCM_REG_COUNTER(a, b) \493 # define HM_REG_COUNTER(a, b) \ 494 494 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Profiling of vmlaunch", b, i); \ 495 495 AssertRC(rc); 496 496 497 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowNM, "/HWACCM/CPU%d/Exit/Trap/Shw/#NM");498 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNM, "/HWACCM/CPU%d/Exit/Trap/Gst/#NM");499 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPF, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF");500 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitShadowPFEM, "/HWACCM/CPU%d/Exit/Trap/Shw/#PF-EM");501 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestPF, "/HWACCM/CPU%d/Exit/Trap/Gst/#PF");502 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestUD, "/HWACCM/CPU%d/Exit/Trap/Gst/#UD");503 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestSS, "/HWACCM/CPU%d/Exit/Trap/Gst/#SS");504 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestNP, "/HWACCM/CPU%d/Exit/Trap/Gst/#NP");505 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestGP, "/HWACCM/CPU%d/Exit/Trap/Gst/#GP");506 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestMF, "/HWACCM/CPU%d/Exit/Trap/Gst/#MF");507 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDE, "/HWACCM/CPU%d/Exit/Trap/Gst/#DE");508 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestDB, "/HWACCM/CPU%d/Exit/Trap/Gst/#DB");509 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestBP, "/HWACCM/CPU%d/Exit/Trap/Gst/#BP");510 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestXF, "/HWACCM/CPU%d/Exit/Trap/Gst/#XF");511 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitGuestXcpUnk, "/HWACCM/CPU%d/Exit/Trap/Gst/Other");512 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvlpg, "/HWACCM/CPU%d/Exit/Instr/Invlpg");513 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInvd, "/HWACCM/CPU%d/Exit/Instr/Invd");514 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCpuid, "/HWACCM/CPU%d/Exit/Instr/Cpuid");515 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtsc, "/HWACCM/CPU%d/Exit/Instr/Rdtsc");516 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdtscp, "/HWACCM/CPU%d/Exit/Instr/Rdtscp");517 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdpmc, "/HWACCM/CPU%d/Exit/Instr/Rdpmc");518 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitRdmsr, "/HWACCM/CPU%d/Exit/Instr/Rdmsr");519 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitWrmsr, "/HWACCM/CPU%d/Exit/Instr/Wrmsr");520 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMwait, "/HWACCM/CPU%d/Exit/Instr/Mwait");521 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMonitor, "/HWACCM/CPU%d/Exit/Instr/Monitor");522 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxWrite, "/HWACCM/CPU%d/Exit/Instr/DR/Write");523 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitDRxRead, "/HWACCM/CPU%d/Exit/Instr/DR/Read");524 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCLTS, "/HWACCM/CPU%d/Exit/Instr/CLTS");525 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitLMSW, "/HWACCM/CPU%d/Exit/Instr/LMSW");526 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitCli, "/HWACCM/CPU%d/Exit/Instr/Cli");527 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitSti, "/HWACCM/CPU%d/Exit/Instr/Sti");528 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPushf, "/HWACCM/CPU%d/Exit/Instr/Pushf");529 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPopf, "/HWACCM/CPU%d/Exit/Instr/Popf");530 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIret, "/HWACCM/CPU%d/Exit/Instr/Iret");531 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitInt, "/HWACCM/CPU%d/Exit/Instr/Int");532 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitHlt, "/HWACCM/CPU%d/Exit/Instr/Hlt");533 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOWrite, "/HWACCM/CPU%d/Exit/IO/Write");534 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIORead, "/HWACCM/CPU%d/Exit/IO/Read");535 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringWrite, "/HWACCM/CPU%d/Exit/IO/WriteString");536 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIOStringRead, "/HWACCM/CPU%d/Exit/IO/ReadString");537 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitIrqWindow, "/HWACCM/CPU%d/Exit/IrqWindow");538 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMaxResume, "/HWACCM/CPU%d/Exit/MaxResume");539 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitPreemptPending, "/HWACCM/CPU%d/Exit/PreemptPending");540 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatExitMTF, "/HWACCM/CPU%d/Exit/MonitorTrapFlag");541 542 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchGuestIrq, "/HWACCM/CPU%d/Switch/IrqPending");543 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatSwitchToR3, "/HWACCM/CPU%d/Switch/ToR3");544 545 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntInject, "/HWACCM/CPU%d/Irq/Inject");546 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatIntReinject, "/HWACCM/CPU%d/Irq/Reinject");547 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatPendingHostIrq, "/HWACCM/CPU%d/Irq/PendingOnHost");548 549 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPage, "/HWACCM/CPU%d/Flush/Page");550 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageManual, "/HWACCM/CPU%d/Flush/Page/Virt");551 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPhysPageManual, "/HWACCM/CPU%d/Flush/Page/Phys");552 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLB, "/HWACCM/CPU%d/Flush/TLB");553 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBManual, "/HWACCM/CPU%d/Flush/TLB/Manual");554 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBCRxChange, "/HWACCM/CPU%d/Flush/TLB/CRx");555 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushPageInvlpg, "/HWACCM/CPU%d/Flush/Page/Invlpg");556 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Switch");557 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatNoFlushTLBWorldSwitch, "/HWACCM/CPU%d/Flush/TLB/Skipped");558 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID");559 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl");560 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/Shootdown/Page");561 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdownFlush, "/HWACCM/CPU%d/Flush/Shootdown/TLB");562 563 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset");564 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept");565 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow, "/HWACCM/CPU%d/TSC/InterceptOverflow");566 567 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxArmed, "/HWACCM/CPU%d/Debug/Armed");568 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxContextSwitch, "/HWACCM/CPU%d/Debug/ContextSwitch");569 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDRxIOCheck, "/HWACCM/CPU%d/Debug/IOCheck");570 571 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadMinimal, "/HWACCM/CPU%d/Load/Minimal");572 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatLoadFull, "/HWACCM/CPU%d/Load/Full");497 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowNM, "/HM/CPU%d/Exit/Trap/Shw/#NM"); 498 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNM, "/HM/CPU%d/Exit/Trap/Gst/#NM"); 499 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPF, "/HM/CPU%d/Exit/Trap/Shw/#PF"); 500 HM_REG_COUNTER(&pVCpu->hm.s.StatExitShadowPFEM, "/HM/CPU%d/Exit/Trap/Shw/#PF-EM"); 501 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestPF, "/HM/CPU%d/Exit/Trap/Gst/#PF"); 502 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestUD, "/HM/CPU%d/Exit/Trap/Gst/#UD"); 503 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestSS, "/HM/CPU%d/Exit/Trap/Gst/#SS"); 504 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestNP, "/HM/CPU%d/Exit/Trap/Gst/#NP"); 505 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestGP, "/HM/CPU%d/Exit/Trap/Gst/#GP"); 506 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestMF, "/HM/CPU%d/Exit/Trap/Gst/#MF"); 507 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDE, "/HM/CPU%d/Exit/Trap/Gst/#DE"); 508 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestDB, "/HM/CPU%d/Exit/Trap/Gst/#DB"); 509 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestBP, "/HM/CPU%d/Exit/Trap/Gst/#BP"); 510 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXF, "/HM/CPU%d/Exit/Trap/Gst/#XF"); 511 HM_REG_COUNTER(&pVCpu->hm.s.StatExitGuestXcpUnk, "/HM/CPU%d/Exit/Trap/Gst/Other"); 512 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvlpg, "/HM/CPU%d/Exit/Instr/Invlpg"); 513 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInvd, "/HM/CPU%d/Exit/Instr/Invd"); 514 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCpuid, "/HM/CPU%d/Exit/Instr/Cpuid"); 515 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtsc, "/HM/CPU%d/Exit/Instr/Rdtsc"); 516 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdtscp, "/HM/CPU%d/Exit/Instr/Rdtscp"); 517 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdpmc, "/HM/CPU%d/Exit/Instr/Rdpmc"); 518 HM_REG_COUNTER(&pVCpu->hm.s.StatExitRdmsr, "/HM/CPU%d/Exit/Instr/Rdmsr"); 519 HM_REG_COUNTER(&pVCpu->hm.s.StatExitWrmsr, "/HM/CPU%d/Exit/Instr/Wrmsr"); 520 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMwait, "/HM/CPU%d/Exit/Instr/Mwait"); 521 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMonitor, "/HM/CPU%d/Exit/Instr/Monitor"); 522 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxWrite, "/HM/CPU%d/Exit/Instr/DR/Write"); 523 HM_REG_COUNTER(&pVCpu->hm.s.StatExitDRxRead, "/HM/CPU%d/Exit/Instr/DR/Read"); 524 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCLTS, "/HM/CPU%d/Exit/Instr/CLTS"); 525 HM_REG_COUNTER(&pVCpu->hm.s.StatExitLMSW, "/HM/CPU%d/Exit/Instr/LMSW"); 526 HM_REG_COUNTER(&pVCpu->hm.s.StatExitCli, "/HM/CPU%d/Exit/Instr/Cli"); 527 HM_REG_COUNTER(&pVCpu->hm.s.StatExitSti, "/HM/CPU%d/Exit/Instr/Sti"); 528 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPushf, "/HM/CPU%d/Exit/Instr/Pushf"); 529 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPopf, "/HM/CPU%d/Exit/Instr/Popf"); 530 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIret, "/HM/CPU%d/Exit/Instr/Iret"); 531 HM_REG_COUNTER(&pVCpu->hm.s.StatExitInt, "/HM/CPU%d/Exit/Instr/Int"); 532 HM_REG_COUNTER(&pVCpu->hm.s.StatExitHlt, "/HM/CPU%d/Exit/Instr/Hlt"); 533 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOWrite, "/HM/CPU%d/Exit/IO/Write"); 534 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIORead, "/HM/CPU%d/Exit/IO/Read"); 535 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringWrite, "/HM/CPU%d/Exit/IO/WriteString"); 536 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIOStringRead, "/HM/CPU%d/Exit/IO/ReadString"); 537 HM_REG_COUNTER(&pVCpu->hm.s.StatExitIrqWindow, "/HM/CPU%d/Exit/IrqWindow"); 538 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMaxResume, "/HM/CPU%d/Exit/MaxResume"); 539 HM_REG_COUNTER(&pVCpu->hm.s.StatExitPreemptPending, "/HM/CPU%d/Exit/PreemptPending"); 540 HM_REG_COUNTER(&pVCpu->hm.s.StatExitMTF, "/HM/CPU%d/Exit/MonitorTrapFlag"); 541 542 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchGuestIrq, "/HM/CPU%d/Switch/IrqPending"); 543 HM_REG_COUNTER(&pVCpu->hm.s.StatSwitchToR3, "/HM/CPU%d/Switch/ToR3"); 544 545 HM_REG_COUNTER(&pVCpu->hm.s.StatIntInject, "/HM/CPU%d/Irq/Inject"); 546 HM_REG_COUNTER(&pVCpu->hm.s.StatIntReinject, "/HM/CPU%d/Irq/Reinject"); 547 HM_REG_COUNTER(&pVCpu->hm.s.StatPendingHostIrq, "/HM/CPU%d/Irq/PendingOnHost"); 548 549 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPage, "/HM/CPU%d/Flush/Page"); 550 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageManual, "/HM/CPU%d/Flush/Page/Virt"); 551 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPhysPageManual, "/HM/CPU%d/Flush/Page/Phys"); 552 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLB, "/HM/CPU%d/Flush/TLB"); 553 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBManual, "/HM/CPU%d/Flush/TLB/Manual"); 554 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBCRxChange, "/HM/CPU%d/Flush/TLB/CRx"); 555 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushPageInvlpg, "/HM/CPU%d/Flush/Page/Invlpg"); 556 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBWorldSwitch, "/HM/CPU%d/Flush/TLB/Switch"); 557 HM_REG_COUNTER(&pVCpu->hm.s.StatNoFlushTLBWorldSwitch, "/HM/CPU%d/Flush/TLB/Skipped"); 558 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushASID, "/HM/CPU%d/Flush/TLB/ASID"); 559 HM_REG_COUNTER(&pVCpu->hm.s.StatFlushTLBInvlpga, "/HM/CPU%d/Flush/TLB/PhysInvl"); 560 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdown, "/HM/CPU%d/Flush/Shootdown/Page"); 561 HM_REG_COUNTER(&pVCpu->hm.s.StatTlbShootdownFlush, "/HM/CPU%d/Flush/Shootdown/TLB"); 562 563 HM_REG_COUNTER(&pVCpu->hm.s.StatTSCOffset, "/HM/CPU%d/TSC/Offset"); 564 HM_REG_COUNTER(&pVCpu->hm.s.StatTSCIntercept, "/HM/CPU%d/TSC/Intercept"); 565 HM_REG_COUNTER(&pVCpu->hm.s.StatTSCInterceptOverFlow, "/HM/CPU%d/TSC/InterceptOverflow"); 566 567 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxArmed, "/HM/CPU%d/Debug/Armed"); 568 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxContextSwitch, "/HM/CPU%d/Debug/ContextSwitch"); 569 HM_REG_COUNTER(&pVCpu->hm.s.StatDRxIOCheck, "/HM/CPU%d/Debug/IOCheck"); 570 571 HM_REG_COUNTER(&pVCpu->hm.s.StatLoadMinimal, "/HM/CPU%d/Load/Minimal"); 572 HM_REG_COUNTER(&pVCpu->hm.s.StatLoadFull, "/HM/CPU%d/Load/Full"); 573 573 574 574 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 575 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFpu64SwitchBack, "/HWACCM/CPU%d/Switch64/Fpu");576 H WACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatDebug64SwitchBack, "/HWACCM/CPU%d/Switch64/Debug");575 HM_REG_COUNTER(&pVCpu->hm.s.StatFpu64SwitchBack, "/HM/CPU%d/Switch64/Fpu"); 576 HM_REG_COUNTER(&pVCpu->hm.s.StatDebug64SwitchBack, "/HM/CPU%d/Switch64/Debug"); 577 577 #endif 578 578 579 for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->h waccm.s.StatExitCRxWrite); j++)579 for (unsigned j = 0; j < RT_ELEMENTS(pVCpu->hm.s.StatExitCRxWrite); j++) 580 580 { 581 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes",582 "/H WACCM/CPU%d/Exit/Instr/CR/Write/%x", i, j);581 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxWrite[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx writes", 582 "/HM/CPU%d/Exit/Instr/CR/Write/%x", i, j); 583 583 AssertRC(rc); 584 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads",585 "/H WACCM/CPU%d/Exit/Instr/CR/Read/%x", i, j);584 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitCRxRead[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Profiling of CRx reads", 585 "/HM/CPU%d/Exit/Instr/CR/Read/%x", i, j); 586 586 AssertRC(rc); 587 587 } 588 588 589 #undef H WACCM_REG_COUNTER590 591 pVCpu->h waccm.s.paStatExitReason = NULL;592 593 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->h waccm.s.paStatExitReason), 0, MM_TAG_HWACCM, (void **)&pVCpu->hwaccm.s.paStatExitReason);589 #undef HM_REG_COUNTER 590 591 pVCpu->hm.s.paStatExitReason = NULL; 592 593 rc = MMHyperAlloc(pVM, MAX_EXITREASON_STAT*sizeof(*pVCpu->hm.s.paStatExitReason), 0, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatExitReason); 594 594 AssertRC(rc); 595 595 if (RT_SUCCESS(rc)) … … 600 600 if (papszDesc[j]) 601 601 { 602 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,603 papszDesc[j], "/H WACCM/CPU%d/Exit/Reason/%02x", i, j);602 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatExitReason[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, 603 papszDesc[j], "/HM/CPU%d/Exit/Reason/%02x", i, j); 604 604 AssertRC(rc); 605 605 } 606 606 } 607 rc = STAMR3RegisterF(pVM, &pVCpu->h waccm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HWACCM/CPU%d/Exit/Reason/#NPF", i);607 rc = STAMR3RegisterF(pVM, &pVCpu->hm.s.StatExitReasonNPF, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Nested page fault", "/HM/CPU%d/Exit/Reason/#NPF", i); 608 608 AssertRC(rc); 609 609 } 610 pVCpu->h waccm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatExitReason);610 pVCpu->hm.s.paStatExitReasonR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatExitReason); 611 611 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 612 Assert(pVCpu->h waccm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));612 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM)); 613 613 # else 614 Assert(pVCpu->h waccm.s.paStatExitReasonR0 != NIL_RTR0PTR);614 Assert(pVCpu->hm.s.paStatExitReasonR0 != NIL_RTR0PTR); 615 615 # endif 616 616 617 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_H WACCM, (void **)&pVCpu->hwaccm.s.paStatInjectedIrqs);617 rc = MMHyperAlloc(pVM, sizeof(STAMCOUNTER) * 256, 8, MM_TAG_HM, (void **)&pVCpu->hm.s.paStatInjectedIrqs); 618 618 AssertRCReturn(rc, rc); 619 pVCpu->h waccm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hwaccm.s.paStatInjectedIrqs);619 pVCpu->hm.s.paStatInjectedIrqsR0 = MMHyperR3ToR0(pVM, pVCpu->hm.s.paStatInjectedIrqs); 620 620 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 621 Assert(pVCpu->h waccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM));621 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR || !VMMIsHwVirtExtForced(pVM)); 622 622 # else 623 Assert(pVCpu->h waccm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR);623 Assert(pVCpu->hm.s.paStatInjectedIrqsR0 != NIL_RTR0PTR); 624 624 # endif 625 625 for (unsigned j = 0; j < 255; j++) 626 STAMR3RegisterF(pVM, &pVCpu->h waccm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.",627 (j < 0x20) ? "/H WACCM/CPU%d/Interrupt/Trap/%02X" : "/HWACCM/CPU%d/Interrupt/IRQ/%02X", i, j);626 STAMR3RegisterF(pVM, &pVCpu->hm.s.paStatInjectedIrqs[j], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, "Forwarded interrupts.", 627 (j < 0x20) ? "/HM/CPU%d/Interrupt/Trap/%02X" : "/HM/CPU%d/Interrupt/IRQ/%02X", i, j); 628 628 629 629 } … … 636 636 PVMCPU pVCpu = &pVM->aCpus[i]; 637 637 638 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;638 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 639 639 strcpy((char *)pCache->aMagic, "VMCSCACHE Magic"); 640 640 pCache->uMagic = UINT64_C(0xDEADBEEFDEADBEEF); … … 652 652 * @param enmWhat The phase that completed. 653 653 */ 654 VMMR3_INT_DECL(int) H WACCMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)654 VMMR3_INT_DECL(int) HMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat) 655 655 { 656 656 switch (enmWhat) 657 657 { 658 658 case VMINITCOMPLETED_RING3: 659 return h waccmR3InitCPU(pVM);659 return hmR3InitCPU(pVM); 660 660 case VMINITCOMPLETED_RING0: 661 return h waccmR3InitFinalizeR0(pVM);661 return hmR3InitFinalizeR0(pVM); 662 662 default: 663 663 return VINF_SUCCESS; … … 671 671 * @param pVM Pointer to the VM. 672 672 */ 673 static void h waccmR3DisableRawMode(PVM pVM)673 static void hmR3DisableRawMode(PVM pVM) 674 674 { 675 675 /* Disable PATM & CSAM. */ … … 706 706 * @param pVM Pointer to the VM. 707 707 */ 708 static int h waccmR3InitFinalizeR0(PVM pVM)708 static int hmR3InitFinalizeR0(PVM pVM) 709 709 { 710 710 int rc; … … 714 714 * is already using AMD-V. 715 715 */ 716 if ( !pVM->h waccm.s.vmx.fSupported717 && !pVM->h waccm.s.svm.fSupported718 && pVM->h waccm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */716 if ( !pVM->hm.s.vmx.fSupported 717 && !pVM->hm.s.svm.fSupported 718 && pVM->hm.s.lLastError == VERR_SVM_IN_USE /* implies functional AMD-V */ 719 719 && RTEnvExist("VBOX_HWVIRTEX_IGNORE_SVM_IN_USE")) 720 720 { 721 LogRel(("H WACCM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n"));722 pVM->h waccm.s.svm.fSupported = true;723 pVM->h waccm.s.svm.fIgnoreInUseError = true;721 LogRel(("HM: VBOX_HWVIRTEX_IGNORE_SVM_IN_USE active!\n")); 722 pVM->hm.s.svm.fSupported = true; 723 pVM->hm.s.svm.fIgnoreInUseError = true; 724 724 } 725 725 else 726 if ( !pVM->h waccm.s.vmx.fSupported727 && !pVM->h waccm.s.svm.fSupported)728 { 729 LogRel(("H WACCM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hwaccm.s.lLastError));730 LogRel(("H WACCM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));726 if ( !pVM->hm.s.vmx.fSupported 727 && !pVM->hm.s.svm.fSupported) 728 { 729 LogRel(("HM: No VT-x or AMD-V CPU extension found. Reason %Rrc\n", pVM->hm.s.lLastError)); 730 LogRel(("HM: VMX MSR_IA32_FEATURE_CONTROL=%RX64\n", pVM->hm.s.vmx.msr.feature_ctrl)); 731 731 732 732 if (VMMIsHwVirtExtForced(pVM)) 733 733 { 734 switch (pVM->h waccm.s.lLastError)734 switch (pVM->hm.s.lLastError) 735 735 { 736 736 case VERR_VMX_NO_VMX: … … 745 745 return VM_SET_ERROR(pVM, VERR_SVM_DISABLED, "AMD-V is disabled in the BIOS."); 746 746 default: 747 return pVM->h waccm.s.lLastError;747 return pVM->hm.s.lLastError; 748 748 } 749 749 } … … 751 751 } 752 752 753 if (pVM->h waccm.s.vmx.fSupported)753 if (pVM->hm.s.vmx.fSupported) 754 754 { 755 755 rc = SUPR3QueryVTxSupported(); … … 757 757 { 758 758 #ifdef RT_OS_LINUX 759 LogRel(("H WACCM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n"));759 LogRel(("HM: The host kernel does not support VT-x -- Linux 2.6.13 or newer required!\n")); 760 760 #else 761 LogRel(("H WACCM: The host kernel does not support VT-x!\n"));761 LogRel(("HM: The host kernel does not support VT-x!\n")); 762 762 #endif 763 763 if ( pVM->cCpus > 1 … … 770 770 } 771 771 772 if (!pVM->h waccm.s.fAllowed)772 if (!pVM->hm.s.fAllowed) 773 773 return VINF_SUCCESS; /* nothing to do */ 774 774 … … 777 777 if (RT_FAILURE(rc)) 778 778 { 779 LogRel(("H WACCMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc));779 LogRel(("HMR3InitFinalize: SUPR3CallVMMR0Ex VMMR0_DO_HWACC_ENABLE failed with %Rrc\n", rc)); 780 780 return rc; 781 781 } 782 Assert(!pVM->fH WACCMEnabled || VMMIsHwVirtExtForced(pVM));783 784 pVM->h waccm.s.fHasIoApic = PDMHasIoApic(pVM);782 Assert(!pVM->fHMEnabled || VMMIsHwVirtExtForced(pVM)); 783 784 pVM->hm.s.fHasIoApic = PDMHasIoApic(pVM); 785 785 /* No TPR patching is required when the IO-APIC is not enabled for this VM. (Main should have taken care of this already) */ 786 if (!pVM->h waccm.s.fHasIoApic)787 { 788 Assert(!pVM->h waccm.s.fTRPPatchingAllowed); /* paranoia */789 pVM->h waccm.s.fTRPPatchingAllowed = false;786 if (!pVM->hm.s.fHasIoApic) 787 { 788 Assert(!pVM->hm.s.fTRPPatchingAllowed); /* paranoia */ 789 pVM->hm.s.fTRPPatchingAllowed = false; 790 790 } 791 791 792 792 bool fOldBuffered = RTLogRelSetBuffering(true /*fBuffered*/); 793 if (pVM->h waccm.s.vmx.fSupported)794 { 795 Log(("pVM->h waccm.s.vmx.fSupported = %d\n", pVM->hwaccm.s.vmx.fSupported));796 797 if ( pVM->h waccm.s.fInitialized == false798 && pVM->h waccm.s.vmx.msr.feature_ctrl != 0)793 if (pVM->hm.s.vmx.fSupported) 794 { 795 Log(("pVM->hm.s.vmx.fSupported = %d\n", pVM->hm.s.vmx.fSupported)); 796 797 if ( pVM->hm.s.fInitialized == false 798 && pVM->hm.s.vmx.msr.feature_ctrl != 0) 799 799 { 800 800 uint64_t val; 801 801 RTGCPHYS GCPhys = 0; 802 802 803 LogRel(("H WACCM: Host CR4=%08X\n", pVM->hwaccm.s.vmx.hostCR4));804 LogRel(("H WACCM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hwaccm.s.vmx.msr.feature_ctrl));805 LogRel(("H WACCM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_basic_info));806 LogRel(("H WACCM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));807 LogRel(("H WACCM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));808 LogRel(("H WACCM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hwaccm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None"));809 LogRel(("H WACCM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));810 LogRel(("H WACCM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hwaccm.s.vmx.msr.vmx_basic_info)));811 812 LogRel(("H WACCM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.u));813 val = pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;803 LogRel(("HM: Host CR4=%08X\n", pVM->hm.s.vmx.hostCR4)); 804 LogRel(("HM: MSR_IA32_FEATURE_CONTROL = %RX64\n", pVM->hm.s.vmx.msr.feature_ctrl)); 805 LogRel(("HM: MSR_IA32_VMX_BASIC_INFO = %RX64\n", pVM->hm.s.vmx.msr.vmx_basic_info)); 806 LogRel(("HM: VMCS id = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info))); 807 LogRel(("HM: VMCS size = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info))); 808 LogRel(("HM: VMCS physical address limit = %s\n", MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(pVM->hm.s.vmx.msr.vmx_basic_info) ? "< 4 GB" : "None")); 809 LogRel(("HM: VMCS memory type = %x\n", MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(pVM->hm.s.vmx.msr.vmx_basic_info))); 810 LogRel(("HM: Dual monitor treatment = %d\n", MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(pVM->hm.s.vmx.msr.vmx_basic_info))); 811 812 LogRel(("HM: MSR_IA32_VMX_PINBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_pin_ctls.u)); 813 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; 814 814 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT) 815 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n"));815 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT\n")); 816 816 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT) 817 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n"));817 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT\n")); 818 818 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI) 819 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n"));819 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI\n")); 820 820 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER) 821 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n"));822 val = pVM->h waccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;821 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER\n")); 822 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; 823 823 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT) 824 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n"));824 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT *must* be set\n")); 825 825 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT) 826 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n"));826 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT *must* be set\n")); 827 827 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI) 828 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n"));828 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_VIRTUAL_NMI *must* be set\n")); 829 829 if (val & VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER) 830 LogRel(("H WACCM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n"));831 832 LogRel(("H WACCM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.u));833 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;830 LogRel(("HM: VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER *must* be set\n")); 831 832 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls.u)); 833 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; 834 834 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT) 835 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n"));835 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT\n")); 836 836 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET) 837 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n"));837 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET\n")); 838 838 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT) 839 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n"));839 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT\n")); 840 840 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT) 841 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n"));841 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT\n")); 842 842 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT) 843 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n"));843 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT\n")); 844 844 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT) 845 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n"));845 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT\n")); 846 846 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT) 847 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n"));847 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT\n")); 848 848 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT) 849 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n"));849 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT\n")); 850 850 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT) 851 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n"));851 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT\n")); 852 852 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT) 853 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n"));853 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT\n")); 854 854 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT) 855 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n"));855 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT\n")); 856 856 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 857 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n"));857 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW\n")); 858 858 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT) 859 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n"));859 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT\n")); 860 860 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT) 861 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n"));861 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT\n")); 862 862 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT) 863 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n"));863 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT\n")); 864 864 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS) 865 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n"));865 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS\n")); 866 866 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG) 867 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n"));867 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG\n")); 868 868 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 869 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n"));869 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS\n")); 870 870 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT) 871 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n"));871 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT\n")); 872 872 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT) 873 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n"));873 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT\n")); 874 874 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 875 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n"));876 877 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;875 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL\n")); 876 877 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; 878 878 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT) 879 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n"));879 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT *must* be set\n")); 880 880 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET) 881 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n"));881 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET *must* be set\n")); 882 882 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT) 883 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n"));883 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT *must* be set\n")); 884 884 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT) 885 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n"));885 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT *must* be set\n")); 886 886 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT) 887 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n"));887 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT *must* be set\n")); 888 888 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT) 889 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n"));889 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT *must* be set\n")); 890 890 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT) 891 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n"));891 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT *must* be set\n")); 892 892 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT) 893 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n"));893 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT *must* be set\n")); 894 894 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT) 895 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n"));895 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT *must* be set\n")); 896 896 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT) 897 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n"));897 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT *must* be set\n")); 898 898 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT) 899 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n"));899 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT *must* be set\n")); 900 900 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW) 901 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n"));901 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW *must* be set\n")); 902 902 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT) 903 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n"));903 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_NMI_WINDOW_EXIT *must* be set\n")); 904 904 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT) 905 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n"));905 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT *must* be set\n")); 906 906 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT) 907 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n"));907 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT *must* be set\n")); 908 908 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS) 909 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n"));909 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_IO_BITMAPS *must* be set\n")); 910 910 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG) 911 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n"));911 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG *must* be set\n")); 912 912 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 913 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n"));913 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS *must* be set\n")); 914 914 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT) 915 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n"));915 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT *must* be set\n")); 916 916 if (val & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT) 917 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n"));917 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_PAUSE_EXIT *must* be set\n")); 918 918 if (val & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 919 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n"));920 921 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)919 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL *must* be set\n")); 920 921 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 922 922 { 923 LogRel(("H WACCM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.u));924 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;923 LogRel(("HM: MSR_IA32_VMX_PROCBASED_CTLS2 = %RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.u)); 924 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; 925 925 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 926 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n"));926 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC\n")); 927 927 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 928 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n"));928 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_EPT\n")); 929 929 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT) 930 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n"));930 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT\n")); 931 931 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 932 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n"));932 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP\n")); 933 933 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC) 934 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n"));934 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC\n")); 935 935 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 936 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n"));936 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VPID\n")); 937 937 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 938 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n"));938 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT\n")); 939 939 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE) 940 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n"));940 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE\n")); 941 941 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT) 942 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n"));943 944 val = pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;942 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT\n")); 943 944 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; 945 945 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) 946 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n"));946 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC *must* be set\n")); 947 947 if (val & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT) 948 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n"));948 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_INSTR_EXIT *must* be set\n")); 949 949 if (val & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 950 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n"));950 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP *must* be set\n")); 951 951 if (val & VMX_VMCS_CTRL_PROC_EXEC2_X2APIC) 952 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n"));952 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_X2APIC *must* be set\n")); 953 953 if (val & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 954 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n"));954 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_EPT *must* be set\n")); 955 955 if (val & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 956 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n"));956 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_VPID *must* be set\n")); 957 957 if (val & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 958 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n"));958 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT *must* be set\n")); 959 959 if (val & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE) 960 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n"));960 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE *must* be set\n")); 961 961 if (val & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT) 962 LogRel(("H WACCM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n"));962 LogRel(("HM: VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT *must* be set\n")); 963 963 } 964 964 965 LogRel(("H WACCM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_entry.u));966 val = pVM->h waccm.s.vmx.msr.vmx_entry.n.allowed1;965 LogRel(("HM: MSR_IA32_VMX_ENTRY_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_entry.u)); 966 val = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; 967 967 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG) 968 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n"));968 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG\n")); 969 969 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE) 970 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n"));970 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE\n")); 971 971 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM) 972 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n"));972 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM\n")); 973 973 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON) 974 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n"));974 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON\n")); 975 975 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR) 976 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n"));976 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR\n")); 977 977 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR) 978 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n"));978 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR\n")); 979 979 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR) 980 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n"));981 val = pVM->h waccm.s.vmx.msr.vmx_entry.n.disallowed0;980 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR\n")); 981 val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; 982 982 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG) 983 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n"));983 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG *must* be set\n")); 984 984 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE) 985 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n"));985 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE *must* be set\n")); 986 986 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM) 987 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n"));987 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_ENTRY_SMM *must* be set\n")); 988 988 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON) 989 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n"));989 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_DEACTIVATE_DUALMON *must* be set\n")); 990 990 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR) 991 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n"));991 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PERF_MSR *must* be set\n")); 992 992 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR) 993 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n"));993 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_PAT_MSR *must* be set\n")); 994 994 if (val & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR) 995 LogRel(("H WACCM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n"));996 997 LogRel(("H WACCM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_exit.u));998 val = pVM->h waccm.s.vmx.msr.vmx_exit.n.allowed1;995 LogRel(("HM: VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR *must* be set\n")); 996 997 LogRel(("HM: MSR_IA32_VMX_EXIT_CTLS = %RX64\n", pVM->hm.s.vmx.msr.vmx_exit.u)); 998 val = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; 999 999 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG) 1000 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n"));1000 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG\n")); 1001 1001 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64) 1002 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n"));1002 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64\n")); 1003 1003 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ) 1004 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n"));1004 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ\n")); 1005 1005 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR) 1006 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n"));1006 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR\n")); 1007 1007 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR) 1008 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n"));1008 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR\n")); 1009 1009 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR) 1010 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n"));1010 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR\n")); 1011 1011 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR) 1012 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n"));1012 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR\n")); 1013 1013 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER) 1014 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n"));1015 val = pVM->h waccm.s.vmx.msr.vmx_exit.n.disallowed0;1014 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER\n")); 1015 val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; 1016 1016 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG) 1017 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n"));1017 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG *must* be set\n")); 1018 1018 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64) 1019 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n"));1019 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64 *must* be set\n")); 1020 1020 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ) 1021 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n"));1021 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_ACK_EXTERNAL_IRQ *must* be set\n")); 1022 1022 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR) 1023 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n"));1023 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_PAT_MSR *must* be set\n")); 1024 1024 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR) 1025 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n"));1025 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_PAT_MSR *must* be set\n")); 1026 1026 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR) 1027 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n"));1027 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_GUEST_EFER_MSR *must* be set\n")); 1028 1028 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR) 1029 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n"));1029 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR *must* be set\n")); 1030 1030 if (val & VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER) 1031 LogRel(("H WACCM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n"));1032 1033 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps)1031 LogRel(("HM: VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_VMX_PREEMPT_TIMER *must* be set\n")); 1032 1033 if (pVM->hm.s.vmx.msr.vmx_eptcaps) 1034 1034 { 1035 LogRel(("H WACCM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_eptcaps));1036 1037 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY)1038 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n"));1039 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY)1040 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n"));1041 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY)1042 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n"));1043 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS)1044 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n"));1045 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS)1046 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n"));1047 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS)1048 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n"));1049 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS)1050 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n"));1051 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS)1052 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n"));1053 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC)1054 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n"));1055 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC)1056 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n"));1057 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT)1058 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n"));1059 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP)1060 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n"));1061 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB)1062 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n"));1063 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS)1064 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n"));1065 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS)1066 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n"));1067 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS)1068 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n"));1069 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS)1070 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n"));1071 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT)1072 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n"));1073 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT)1074 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT\n"));1075 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS)1076 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS\n"));1077 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID)1078 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n"));1079 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR)1080 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR\n"));1081 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT)1082 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT\n"));1083 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)1084 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS\n"));1085 if (pVM->h waccm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS)1086 LogRel(("H WACCM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));1035 LogRel(("HM: MSR_IA32_VMX_EPT_VPID_CAPS = %RX64\n", pVM->hm.s.vmx.msr.vmx_eptcaps)); 1036 1037 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY) 1038 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_RWX_X_ONLY\n")); 1039 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY) 1040 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_RWX_W_ONLY\n")); 1041 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY) 1042 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_RWX_WX_ONLY\n")); 1043 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS) 1044 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_GAW_21_BITS\n")); 1045 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS) 1046 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_GAW_30_BITS\n")); 1047 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS) 1048 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_GAW_39_BITS\n")); 1049 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS) 1050 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_GAW_48_BITS\n")); 1051 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS) 1052 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_GAW_57_BITS\n")); 1053 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_UC) 1054 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_EMT_UC\n")); 1055 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WC) 1056 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_EMT_WC\n")); 1057 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WT) 1058 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_EMT_WT\n")); 1059 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WP) 1060 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_EMT_WP\n")); 1061 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_EMT_WB) 1062 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_EMT_WB\n")); 1063 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_21_BITS) 1064 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_SP_21_BITS\n")); 1065 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_30_BITS) 1066 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_SP_30_BITS\n")); 1067 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_39_BITS) 1068 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_SP_39_BITS\n")); 1069 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_SP_48_BITS) 1070 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_SP_48_BITS\n")); 1071 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT) 1072 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVEPT\n")); 1073 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT) 1074 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT\n")); 1075 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS) 1076 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS\n")); 1077 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID) 1078 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVVPID\n")); 1079 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR) 1080 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_INDIV_ADDR\n")); 1081 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT) 1082 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT\n")); 1083 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS) 1084 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS\n")); 1085 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS) 1086 LogRel(("HM: MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS\n")); 1087 1087 } 1088 1088 1089 LogRel(("H WACCM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_misc));1090 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->h waccm.s.vmx.msr.vmx_misc) == pVM->hwaccm.s.vmx.cPreemptTimerShift)1091 LogRel(("H WACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hwaccm.s.vmx.msr.vmx_misc)));1089 LogRel(("HM: MSR_IA32_VMX_MISC = %RX64\n", pVM->hm.s.vmx.msr.vmx_misc)); 1090 if (MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc) == pVM->hm.s.vmx.cPreemptTimerShift) 1091 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x\n", MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc))); 1092 1092 else 1093 1093 { 1094 LogRel(("H WACCM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n",1095 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->h waccm.s.vmx.msr.vmx_misc), pVM->hwaccm.s.vmx.cPreemptTimerShift));1094 LogRel(("HM: MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT %x - erratum detected, using %x instead\n", 1095 MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(pVM->hm.s.vmx.msr.vmx_misc), pVM->hm.s.vmx.cPreemptTimerShift)); 1096 1096 } 1097 LogRel(("H WACCM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hwaccm.s.vmx.msr.vmx_misc)));1098 LogRel(("H WACCM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hwaccm.s.vmx.msr.vmx_misc)));1099 LogRel(("H WACCM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hwaccm.s.vmx.msr.vmx_misc)));1100 LogRel(("H WACCM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hwaccm.s.vmx.msr.vmx_misc)));1101 1102 LogRel(("H WACCM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed0));1103 LogRel(("H WACCM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr0_fixed1));1104 LogRel(("H WACCM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0));1105 LogRel(("H WACCM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed1));1106 LogRel(("H WACCM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hwaccm.s.vmx.msr.vmx_vmcs_enum));1107 1108 LogRel(("H WACCM: TPR shadow physaddr = %RHp\n", pVM->hwaccm.s.vmx.pAPICPhys));1097 LogRel(("HM: MSR_IA32_VMX_MISC_ACTIVITY_STATES %x\n", MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc))); 1098 LogRel(("HM: MSR_IA32_VMX_MISC_CR3_TARGET %x\n", MSR_IA32_VMX_MISC_CR3_TARGET(pVM->hm.s.vmx.msr.vmx_misc))); 1099 LogRel(("HM: MSR_IA32_VMX_MISC_MAX_MSR %x\n", MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))); 1100 LogRel(("HM: MSR_IA32_VMX_MISC_MSEG_ID %x\n", MSR_IA32_VMX_MISC_MSEG_ID(pVM->hm.s.vmx.msr.vmx_misc))); 1101 1102 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED0 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed0)); 1103 LogRel(("HM: MSR_IA32_VMX_CR0_FIXED1 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr0_fixed1)); 1104 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED0 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed0)); 1105 LogRel(("HM: MSR_IA32_VMX_CR4_FIXED1 = %RX64\n", pVM->hm.s.vmx.msr.vmx_cr4_fixed1)); 1106 LogRel(("HM: MSR_IA32_VMX_VMCS_ENUM = %RX64\n", pVM->hm.s.vmx.msr.vmx_vmcs_enum)); 1107 1108 LogRel(("HM: TPR shadow physaddr = %RHp\n", pVM->hm.s.vmx.pAPICPhys)); 1109 1109 1110 1110 /* Paranoia */ 1111 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->h waccm.s.vmx.msr.vmx_misc) >= 512);1111 AssertRelease(MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc) >= 512); 1112 1112 1113 1113 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1114 1114 { 1115 LogRel(("H WACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys));1116 LogRel(("H WACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));1115 LogRel(("HM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pMSRBitmapPhys)); 1116 LogRel(("HM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS)); 1117 1117 } 1118 1118 1119 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT)1120 pVM->h waccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;1121 1122 if (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID)1123 pVM->h waccm.s.vmx.fVPID = pVM->hwaccm.s.vmx.fAllowVPID;1119 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 1120 pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging; 1121 1122 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 1123 pVM->hm.s.vmx.fVPID = pVM->hm.s.vmx.fAllowVPID; 1124 1124 1125 1125 /* … … 1128 1128 * in Nehalems and secondary VM exec. controls should be supported in all of them, but nonetheless it's Intel... 1129 1129 */ 1130 if (!(pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1130 if (!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1131 1131 && CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)) 1132 1132 { … … 1135 1135 1136 1136 /* Unrestricted guest execution relies on EPT. */ 1137 if ( pVM->h waccm.s.fNestedPaging1138 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE))1137 if ( pVM->hm.s.fNestedPaging 1138 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE)) 1139 1139 { 1140 pVM->h waccm.s.vmx.fUnrestrictedGuest = true;1140 pVM->hm.s.vmx.fUnrestrictedGuest = true; 1141 1141 } 1142 1142 1143 1143 /* Only try once. */ 1144 pVM->h waccm.s.fInitialized = true;1145 1146 if (!pVM->h waccm.s.vmx.fUnrestrictedGuest)1144 pVM->hm.s.fInitialized = true; 1145 1146 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 1147 1147 { 1148 1148 /* Allocate three pages for the TSS we need for real mode emulation. (2 pages for the IO bitmap) */ 1149 rc = PDMR3VMMDevHeapAlloc(pVM, H WACCM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hwaccm.s.vmx.pRealModeTSS);1149 rc = PDMR3VMMDevHeapAlloc(pVM, HM_VTX_TOTAL_DEVHEAP_MEM, (RTR3PTR *)&pVM->hm.s.vmx.pRealModeTSS); 1150 1150 if (RT_SUCCESS(rc)) 1151 1151 { 1152 1152 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. */ 1153 ASMMemZero32(pVM->h waccm.s.vmx.pRealModeTSS, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));1154 pVM->h waccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);1153 ASMMemZero32(pVM->hm.s.vmx.pRealModeTSS, sizeof(*pVM->hm.s.vmx.pRealModeTSS)); 1154 pVM->hm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hm.s.vmx.pRealModeTSS); 1155 1155 /* Bit set to 0 means redirection enabled. */ 1156 memset(pVM->h waccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));1156 memset(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hm.s.vmx.pRealModeTSS->IntRedirBitmap)); 1157 1157 /* Allow all port IO, so the VT-x IO intercepts do their job. */ 1158 memset(pVM->h waccm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2);1159 *((unsigned char *)pVM->h waccm.s.vmx.pRealModeTSS + HWACCM_VTX_TSS_SIZE - 2) = 0xff;1158 memset(pVM->hm.s.vmx.pRealModeTSS + 1, 0, PAGE_SIZE*2); 1159 *((unsigned char *)pVM->hm.s.vmx.pRealModeTSS + HM_VTX_TSS_SIZE - 2) = 0xff; 1160 1160 1161 1161 /* … … 1163 1163 * real and protected mode without paging with EPT. 1164 1164 */ 1165 pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hwaccm.s.vmx.pRealModeTSS + PAGE_SIZE * 3);1165 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = (PX86PD)((char *)pVM->hm.s.vmx.pRealModeTSS + PAGE_SIZE * 3); 1166 1166 for (unsigned i = 0; i < X86_PG_ENTRIES; i++) 1167 1167 { 1168 pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i;1169 pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G;1168 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u = _4M * i; 1169 pVM->hm.s.vmx.pNonPagingModeEPTPageTable->a[i].u |= X86_PDE4M_P | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_A | X86_PDE4M_D | X86_PDE4M_PS | X86_PDE4M_G; 1170 1170 } 1171 1171 1172 1172 /* We convert it here every time as pci regions could be reconfigured. */ 1173 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->h waccm.s.vmx.pRealModeTSS, &GCPhys);1173 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys); 1174 1174 AssertRC(rc); 1175 LogRel(("H WACCM: Real Mode TSS guest physaddr = %RGp\n", GCPhys));1176 1177 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);1175 LogRel(("HM: Real Mode TSS guest physaddr = %RGp\n", GCPhys)); 1176 1177 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys); 1178 1178 AssertRC(rc); 1179 LogRel(("H WACCM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys));1179 LogRel(("HM: Non-Paging Mode EPT CR3 = %RGp\n", GCPhys)); 1180 1180 } 1181 1181 else 1182 1182 { 1183 LogRel(("H WACCM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc));1184 pVM->h waccm.s.vmx.pRealModeTSS = NULL;1185 pVM->h waccm.s.vmx.pNonPagingModeEPTPageTable = NULL;1183 LogRel(("HM: No real mode VT-x support (PDMR3VMMDevHeapAlloc returned %Rrc)\n", rc)); 1184 pVM->hm.s.vmx.pRealModeTSS = NULL; 1185 pVM->hm.s.vmx.pNonPagingModeEPTPageTable = NULL; 1186 1186 } 1187 1187 } … … 1191 1191 if (rc == VINF_SUCCESS) 1192 1192 { 1193 pVM->fH WACCMEnabled = true;1194 pVM->h waccm.s.vmx.fEnabled = true;1195 h waccmR3DisableRawMode(pVM);1193 pVM->fHMEnabled = true; 1194 pVM->hm.s.vmx.fEnabled = true; 1195 hmR3DisableRawMode(pVM); 1196 1196 1197 1197 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP); 1198 1198 #ifdef VBOX_ENABLE_64_BITS_GUESTS 1199 if (pVM->h waccm.s.fAllow64BitGuests)1199 if (pVM->hm.s.fAllow64BitGuests) 1200 1200 { 1201 1201 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE); … … 1209 1209 /* Todo: this needs to be fixed properly!! */ 1210 1210 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE) 1211 && (pVM->h waccm.s.vmx.hostEFER & MSR_K6_EFER_NXE))1211 && (pVM->hm.s.vmx.hostEFER & MSR_K6_EFER_NXE)) 1212 1212 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_NX); 1213 1213 1214 LogRel((pVM->h waccm.s.fAllow64BitGuests1215 ? "H WACCM: 32-bit and 64-bit guests supported.\n"1216 : "H WACCM: 32-bit guests supported.\n"));1214 LogRel((pVM->hm.s.fAllow64BitGuests 1215 ? "HM: 32-bit and 64-bit guests supported.\n" 1216 : "HM: 32-bit guests supported.\n")); 1217 1217 #else 1218 LogRel(("H WACCM: 32-bit guests supported.\n"));1218 LogRel(("HM: 32-bit guests supported.\n")); 1219 1219 #endif 1220 LogRel(("H WACCM: VMX enabled!\n"));1221 if (pVM->h waccm.s.fNestedPaging)1220 LogRel(("HM: VMX enabled!\n")); 1221 if (pVM->hm.s.fNestedPaging) 1222 1222 { 1223 LogRel(("H WACCM: Enabled nested paging\n"));1224 LogRel(("H WACCM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM))));1225 if (pVM->h waccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_SINGLE_CONTEXT)1226 LogRel(("H WACCM: enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT\n"));1227 else if (pVM->h waccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_ALL_CONTEXTS)1228 LogRel(("H WACCM: enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS\n"));1229 else if (pVM->h waccm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_NOT_SUPPORTED)1230 LogRel(("H WACCM: enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED\n"));1223 LogRel(("HM: Enabled nested paging\n")); 1224 LogRel(("HM: EPT root page = %RHp\n", PGMGetHyperCR3(VMMGetCpu(pVM)))); 1225 if (pVM->hm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_SINGLE_CONTEXT) 1226 LogRel(("HM: enmFlushEPT = VMX_FLUSH_EPT_SINGLE_CONTEXT\n")); 1227 else if (pVM->hm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_ALL_CONTEXTS) 1228 LogRel(("HM: enmFlushEPT = VMX_FLUSH_EPT_ALL_CONTEXTS\n")); 1229 else if (pVM->hm.s.vmx.enmFlushEPT == VMX_FLUSH_EPT_NOT_SUPPORTED) 1230 LogRel(("HM: enmFlushEPT = VMX_FLUSH_EPT_NOT_SUPPORTED\n")); 1231 1231 else 1232 LogRel(("H WACCM: enmFlushEPT = %d\n", pVM->hwaccm.s.vmx.enmFlushEPT));1233 1234 if (pVM->h waccm.s.vmx.fUnrestrictedGuest)1235 LogRel(("H WACCM: Unrestricted guest execution enabled!\n"));1232 LogRel(("HM: enmFlushEPT = %d\n", pVM->hm.s.vmx.enmFlushEPT)); 1233 1234 if (pVM->hm.s.vmx.fUnrestrictedGuest) 1235 LogRel(("HM: Unrestricted guest execution enabled!\n")); 1236 1236 1237 1237 #if HC_ARCH_BITS == 64 1238 if (pVM->h waccm.s.fLargePages)1238 if (pVM->hm.s.fLargePages) 1239 1239 { 1240 1240 /* Use large (2 MB) pages for our EPT PDEs where possible. */ 1241 1241 PGMSetLargePageUsage(pVM, true); 1242 LogRel(("H WACCM: Large page support enabled!\n"));1242 LogRel(("HM: Large page support enabled!\n")); 1243 1243 } 1244 1244 #endif 1245 1245 } 1246 1246 else 1247 Assert(!pVM->h waccm.s.vmx.fUnrestrictedGuest);1248 1249 if (pVM->h waccm.s.vmx.fVPID)1247 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); 1248 1249 if (pVM->hm.s.vmx.fVPID) 1250 1250 { 1251 LogRel(("H WACCM: Enabled VPID\n"));1252 if (pVM->h waccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_INDIV_ADDR)1253 LogRel(("H WACCM: enmFlushVPID = VMX_FLUSH_VPID_INDIV_ADDR\n"));1254 else if (pVM->h waccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT)1255 LogRel(("H WACCM: enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT\n"));1256 else if (pVM->h waccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_ALL_CONTEXTS)1257 LogRel(("H WACCM: enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS\n"));1258 else if (pVM->h waccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS)1259 LogRel(("H WACCM: enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n"));1251 LogRel(("HM: Enabled VPID\n")); 1252 if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_INDIV_ADDR) 1253 LogRel(("HM: enmFlushVPID = VMX_FLUSH_VPID_INDIV_ADDR\n")); 1254 else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT) 1255 LogRel(("HM: enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT\n")); 1256 else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_ALL_CONTEXTS) 1257 LogRel(("HM: enmFlushVPID = VMX_FLUSH_VPID_ALL_CONTEXTS\n")); 1258 else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS) 1259 LogRel(("HM: enmFlushVPID = VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS\n")); 1260 1260 else 1261 LogRel(("H WACCM: enmFlushVPID = %d\n", pVM->hwaccm.s.vmx.enmFlushVPID));1261 LogRel(("HM: enmFlushVPID = %d\n", pVM->hm.s.vmx.enmFlushVPID)); 1262 1262 } 1263 else if (pVM->h waccm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_NOT_SUPPORTED)1264 LogRel(("H WACCM: Ignoring VPID capabilities of CPU.\n"));1263 else if (pVM->hm.s.vmx.enmFlushVPID == VMX_FLUSH_VPID_NOT_SUPPORTED) 1264 LogRel(("HM: Ignoring VPID capabilities of CPU.\n")); 1265 1265 1266 1266 /* TPR patching status logging. */ 1267 if (pVM->h waccm.s.fTRPPatchingAllowed)1267 if (pVM->hm.s.fTRPPatchingAllowed) 1268 1268 { 1269 if ( (pVM->h waccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)1270 && (pVM->h waccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))1269 if ( (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 1270 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)) 1271 1271 { 1272 pVM->h waccm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */1273 LogRel(("H WACCM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n"));1272 pVM->hm.s.fTRPPatchingAllowed = false; /* not necessary as we have a hardware solution. */ 1273 LogRel(("HM: TPR Patching not required (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC).\n")); 1274 1274 } 1275 1275 else … … 1282 1282 || !(ASMCpuId_EDX(0x80000001) & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)) 1283 1283 { 1284 pVM->h waccm.s.fTRPPatchingAllowed = false;1285 LogRel(("H WACCM: TPR patching disabled (long mode not supported).\n"));1284 pVM->hm.s.fTRPPatchingAllowed = false; 1285 LogRel(("HM: TPR patching disabled (long mode not supported).\n")); 1286 1286 } 1287 1287 } 1288 1288 } 1289 LogRel(("H WACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));1289 LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled")); 1290 1290 1291 1291 /* 1292 1292 * Check for preemption timer config override and log the state of it. 1293 1293 */ 1294 if (pVM->h waccm.s.vmx.fUsePreemptTimer)1294 if (pVM->hm.s.vmx.fUsePreemptTimer) 1295 1295 { 1296 PCFGMNODE pCfgHwAccM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "H WACCM");1297 int rc2 = CFGMR3QueryBoolDef(pCfgHwAccM, "UsePreemptTimer", &pVM->h waccm.s.vmx.fUsePreemptTimer, true);1296 PCFGMNODE pCfgHwAccM = CFGMR3GetChild(CFGMR3GetRoot(pVM), "HM"); 1297 int rc2 = CFGMR3QueryBoolDef(pCfgHwAccM, "UsePreemptTimer", &pVM->hm.s.vmx.fUsePreemptTimer, true); 1298 1298 AssertLogRelRC(rc2); 1299 1299 } 1300 if (pVM->h waccm.s.vmx.fUsePreemptTimer)1301 LogRel(("H WACCM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hwaccm.s.vmx.cPreemptTimerShift));1300 if (pVM->hm.s.vmx.fUsePreemptTimer) 1301 LogRel(("HM: Using the VMX-preemption timer (cPreemptTimerShift=%u)\n", pVM->hm.s.vmx.cPreemptTimerShift)); 1302 1302 } 1303 1303 else 1304 1304 { 1305 LogRel(("H WACCM: VMX setup failed with rc=%Rrc!\n", rc));1306 LogRel(("H WACCM: Last instruction error %x\n", pVM->aCpus[0].hwaccm.s.vmx.lasterror.ulInstrError));1307 pVM->fH WACCMEnabled = false;1305 LogRel(("HM: VMX setup failed with rc=%Rrc!\n", rc)); 1306 LogRel(("HM: Last instruction error %x\n", pVM->aCpus[0].hm.s.vmx.lasterror.ulInstrError)); 1307 pVM->fHMEnabled = false; 1308 1308 } 1309 1309 } 1310 1310 } 1311 1311 else 1312 if (pVM->h waccm.s.svm.fSupported)1313 { 1314 Log(("pVM->h waccm.s.svm.fSupported = %d\n", pVM->hwaccm.s.svm.fSupported));1315 1316 if (pVM->h waccm.s.fInitialized == false)1312 if (pVM->hm.s.svm.fSupported) 1313 { 1314 Log(("pVM->hm.s.svm.fSupported = %d\n", pVM->hm.s.svm.fSupported)); 1315 1316 if (pVM->hm.s.fInitialized == false) 1317 1317 { 1318 1318 /* Erratum 170 which requires a forced TLB flush for each world switch: … … 1343 1343 && !((u32Model == 0x6f || u32Model == 0x6c || u32Model == 0x7c) && u32Stepping >= 2)) 1344 1344 { 1345 LogRel(("H WACMM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping));1345 LogRel(("HM: AMD cpu with erratum 170 family %x model %x stepping %x\n", u32Family, u32Model, u32Stepping)); 1346 1346 } 1347 1347 1348 LogRel(("H WACMM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureECX));1349 LogRel(("H WACMM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX));1350 LogRel(("H WACCM: AMD HWCR MSR = %RX64\n", pVM->hwaccm.s.svm.msrHWCR));1351 LogRel(("H WACCM: AMD-V revision = %X\n", pVM->hwaccm.s.svm.u32Rev));1352 LogRel(("H WACCM: AMD-V max ASID = %d\n", pVM->hwaccm.s.uMaxASID));1353 LogRel(("H WACCM: AMD-V features = %X\n", pVM->hwaccm.s.svm.u32Features));1348 LogRel(("HM: cpuid 0x80000001.u32AMDFeatureECX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureECX)); 1349 LogRel(("HM: cpuid 0x80000001.u32AMDFeatureEDX = %RX32\n", pVM->hm.s.cpuid.u32AMDFeatureEDX)); 1350 LogRel(("HM: AMD HWCR MSR = %RX64\n", pVM->hm.s.svm.msrHWCR)); 1351 LogRel(("HM: AMD-V revision = %X\n", pVM->hm.s.svm.u32Rev)); 1352 LogRel(("HM: AMD-V max ASID = %d\n", pVM->hm.s.uMaxASID)); 1353 LogRel(("HM: AMD-V features = %X\n", pVM->hm.s.svm.u32Features)); 1354 1354 static const struct { uint32_t fFlag; const char *pszName; } s_aSvmFeatures[] = 1355 1355 { … … 1368 1368 #undef FLAG_NAME 1369 1369 }; 1370 uint32_t fSvmFeatures = pVM->h waccm.s.svm.u32Features;1370 uint32_t fSvmFeatures = pVM->hm.s.svm.u32Features; 1371 1371 for (unsigned i = 0; i < RT_ELEMENTS(s_aSvmFeatures); i++) 1372 1372 if (fSvmFeatures & s_aSvmFeatures[i].fFlag) 1373 1373 { 1374 LogRel(("H WACCM: %s\n", s_aSvmFeatures[i].pszName));1374 LogRel(("HM: %s\n", s_aSvmFeatures[i].pszName)); 1375 1375 fSvmFeatures &= ~s_aSvmFeatures[i].fFlag; 1376 1376 } … … 1378 1378 for (unsigned iBit = 0; iBit < 32; iBit++) 1379 1379 if (RT_BIT_32(iBit) & fSvmFeatures) 1380 LogRel(("H WACCM: Reserved bit %u\n", iBit));1380 LogRel(("HM: Reserved bit %u\n", iBit)); 1381 1381 1382 1382 /* Only try once. */ 1383 pVM->h waccm.s.fInitialized = true;1384 1385 if (pVM->h waccm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING)1386 pVM->h waccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging;1383 pVM->hm.s.fInitialized = true; 1384 1385 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_NESTED_PAGING) 1386 pVM->hm.s.fNestedPaging = pVM->hm.s.fAllowNestedPaging; 1387 1387 1388 1388 rc = SUPR3CallVMMR0Ex(pVM->pVMR0, 0 /*idCpu*/, VMMR0_DO_HWACC_SETUP_VM, 0, NULL); … … 1390 1390 if (rc == VINF_SUCCESS) 1391 1391 { 1392 pVM->fH WACCMEnabled = true;1393 pVM->h waccm.s.svm.fEnabled = true;1394 1395 if (pVM->h waccm.s.fNestedPaging)1392 pVM->fHMEnabled = true; 1393 pVM->hm.s.svm.fEnabled = true; 1394 1395 if (pVM->hm.s.fNestedPaging) 1396 1396 { 1397 LogRel(("H WACCM: Enabled nested paging\n"));1397 LogRel(("HM: Enabled nested paging\n")); 1398 1398 #if HC_ARCH_BITS == 64 1399 if (pVM->h waccm.s.fLargePages)1399 if (pVM->hm.s.fLargePages) 1400 1400 { 1401 1401 /* Use large (2 MB) pages for our nested paging PDEs where possible. */ 1402 1402 PGMSetLargePageUsage(pVM, true); 1403 LogRel(("H WACCM: Large page support enabled!\n"));1403 LogRel(("HM: Large page support enabled!\n")); 1404 1404 } 1405 1405 #endif 1406 1406 } 1407 1407 1408 h waccmR3DisableRawMode(pVM);1408 hmR3DisableRawMode(pVM); 1409 1409 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SEP); 1410 1410 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_SYSCALL); 1411 1411 #ifdef VBOX_ENABLE_64_BITS_GUESTS 1412 if (pVM->h waccm.s.fAllow64BitGuests)1412 if (pVM->hm.s.fAllow64BitGuests) 1413 1413 { 1414 1414 CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_PAE); … … 1423 1423 #endif 1424 1424 1425 LogRel((pVM->h waccm.s.fAllow64BitGuests1426 ? "H WACCM: 32-bit and 64-bit guest supported.\n"1427 : "H WACCM: 32-bit guest supported.\n"));1428 1429 LogRel(("H WACCM: TPR Patching %s.\n", (pVM->hwaccm.s.fTRPPatchingAllowed) ? "enabled" : "disabled"));1425 LogRel((pVM->hm.s.fAllow64BitGuests 1426 ? "HM: 32-bit and 64-bit guest supported.\n" 1427 : "HM: 32-bit guest supported.\n")); 1428 1429 LogRel(("HM: TPR Patching %s.\n", (pVM->hm.s.fTRPPatchingAllowed) ? "enabled" : "disabled")); 1430 1430 } 1431 1431 else 1432 1432 { 1433 pVM->fH WACCMEnabled = false;1433 pVM->fHMEnabled = false; 1434 1434 } 1435 1435 } 1436 1436 } 1437 if (pVM->fH WACCMEnabled)1438 LogRel(("H WACCM: VT-x/AMD-V init method: %s\n", (pVM->hwaccm.s.fGlobalInit) ? "GLOBAL" : "LOCAL"));1437 if (pVM->fHMEnabled) 1438 LogRel(("HM: VT-x/AMD-V init method: %s\n", (pVM->hm.s.fGlobalInit) ? "GLOBAL" : "LOCAL")); 1439 1439 RTLogRelSetBuffering(fOldBuffered); 1440 1440 return VINF_SUCCESS; … … 1449 1449 * @param pVM The VM. 1450 1450 */ 1451 VMMR3DECL(void) H WACCMR3Relocate(PVM pVM)1452 { 1453 Log(("H WACCMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0)));1451 VMMR3DECL(void) HMR3Relocate(PVM pVM) 1452 { 1453 Log(("HMR3Relocate to %RGv\n", MMHyperGetArea(pVM, 0))); 1454 1454 1455 1455 /* Fetch the current paging mode during the relocate callback during state loading. */ … … 1460 1460 PVMCPU pVCpu = &pVM->aCpus[i]; 1461 1461 1462 pVCpu->h waccm.s.enmShadowMode = PGMGetShadowMode(pVCpu);1463 Assert(pVCpu->h waccm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu));1464 pVCpu->h waccm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu);1462 pVCpu->hm.s.enmShadowMode = PGMGetShadowMode(pVCpu); 1463 Assert(pVCpu->hm.s.vmx.enmCurrGuestMode == PGMGetGuestMode(pVCpu)); 1464 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMGetGuestMode(pVCpu); 1465 1465 } 1466 1466 } 1467 1467 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 1468 if (pVM->fH WACCMEnabled)1468 if (pVM->fHMEnabled) 1469 1469 { 1470 1470 int rc; … … 1472 1472 { 1473 1473 case PGMMODE_32_BIT: 1474 pVM->h waccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64);1474 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_32_TO_AMD64); 1475 1475 break; 1476 1476 1477 1477 case PGMMODE_PAE: 1478 1478 case PGMMODE_PAE_NX: 1479 pVM->h waccm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64);1479 pVM->hm.s.pfnHost32ToGuest64R0 = VMMR3GetHostToGuestSwitcher(pVM, VMMSWITCHER_PAE_TO_AMD64); 1480 1480 break; 1481 1481 … … 1484 1484 break; 1485 1485 } 1486 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->h waccm.s.pfnVMXGCStartVM64);1486 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "VMXGCStartVM64", &pVM->hm.s.pfnVMXGCStartVM64); 1487 1487 AssertReleaseMsgRC(rc, ("VMXGCStartVM64 -> rc=%Rrc\n", rc)); 1488 1488 1489 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->h waccm.s.pfnSVMGCVMRun64);1489 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "SVMGCVMRun64", &pVM->hm.s.pfnSVMGCVMRun64); 1490 1490 AssertReleaseMsgRC(rc, ("SVMGCVMRun64 -> rc=%Rrc\n", rc)); 1491 1491 1492 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "H WACCMSaveGuestFPU64", &pVM->hwaccm.s.pfnSaveGuestFPU64);1493 AssertReleaseMsgRC(rc, ("H WACCMSetupFPU64 -> rc=%Rrc\n", rc));1494 1495 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "H WACCMSaveGuestDebug64", &pVM->hwaccm.s.pfnSaveGuestDebug64);1496 AssertReleaseMsgRC(rc, ("H WACCMSetupDebug64 -> rc=%Rrc\n", rc));1492 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMSaveGuestFPU64", &pVM->hm.s.pfnSaveGuestFPU64); 1493 AssertReleaseMsgRC(rc, ("HMSetupFPU64 -> rc=%Rrc\n", rc)); 1494 1495 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMSaveGuestDebug64", &pVM->hm.s.pfnSaveGuestDebug64); 1496 AssertReleaseMsgRC(rc, ("HMSetupDebug64 -> rc=%Rrc\n", rc)); 1497 1497 1498 1498 # ifdef DEBUG 1499 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "H WACCMTestSwitcher64", &pVM->hwaccm.s.pfnTest64);1500 AssertReleaseMsgRC(rc, ("H WACCMTestSwitcher64 -> rc=%Rrc\n", rc));1499 rc = PDMR3LdrGetSymbolRC(pVM, NULL, "HMTestSwitcher64", &pVM->hm.s.pfnTest64); 1500 AssertReleaseMsgRC(rc, ("HMTestSwitcher64 -> rc=%Rrc\n", rc)); 1501 1501 # endif 1502 1502 } … … 1512 1512 * @param pVM Pointer to the VM. 1513 1513 */ 1514 VMMR3DECL(bool) H WACCMR3IsAllowed(PVM pVM)1515 { 1516 return pVM->h waccm.s.fAllowed;1514 VMMR3DECL(bool) HMR3IsAllowed(PVM pVM) 1515 { 1516 return pVM->hm.s.fAllowed; 1517 1517 } 1518 1518 … … 1529 1529 * @param enmGuestMode New guest paging mode. 1530 1530 */ 1531 VMMR3DECL(void) H WACCMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode)1531 VMMR3DECL(void) HMR3PagingModeChanged(PVM pVM, PVMCPU pVCpu, PGMMODE enmShadowMode, PGMMODE enmGuestMode) 1532 1532 { 1533 1533 /* Ignore page mode changes during state loading. */ … … 1535 1535 return; 1536 1536 1537 pVCpu->h waccm.s.enmShadowMode = enmShadowMode;1538 1539 if ( pVM->h waccm.s.vmx.fEnabled1540 && pVM->fH WACCMEnabled)1541 { 1542 if ( pVCpu->h waccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL1537 pVCpu->hm.s.enmShadowMode = enmShadowMode; 1538 1539 if ( pVM->hm.s.vmx.fEnabled 1540 && pVM->fHMEnabled) 1541 { 1542 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL 1543 1543 && enmGuestMode >= PGMMODE_PROTECTED) 1544 1544 { … … 1553 1553 } 1554 1554 1555 if (pVCpu->h waccm.s.vmx.enmCurrGuestMode != enmGuestMode)1555 if (pVCpu->hm.s.vmx.enmCurrGuestMode != enmGuestMode) 1556 1556 { 1557 1557 /* Keep track of paging mode changes. */ 1558 pVCpu->h waccm.s.vmx.enmPrevGuestMode = pVCpu->hwaccm.s.vmx.enmCurrGuestMode;1559 pVCpu->h waccm.s.vmx.enmCurrGuestMode = enmGuestMode;1558 pVCpu->hm.s.vmx.enmPrevGuestMode = pVCpu->hm.s.vmx.enmCurrGuestMode; 1559 pVCpu->hm.s.vmx.enmCurrGuestMode = enmGuestMode; 1560 1560 1561 1561 /* Did we miss a change, because all code was executed in the recompiler? */ 1562 if (pVCpu->h waccm.s.vmx.enmLastSeenGuestMode == enmGuestMode)1562 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode == enmGuestMode) 1563 1563 { 1564 Log(("H WACCMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hwaccm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode)));1565 pVCpu->h waccm.s.vmx.enmLastSeenGuestMode = pVCpu->hwaccm.s.vmx.enmPrevGuestMode;1564 Log(("HMR3PagingModeChanged missed %s->%s transition (prev %s)\n", PGMGetModeName(pVCpu->hm.s.vmx.enmPrevGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmCurrGuestMode), PGMGetModeName(pVCpu->hm.s.vmx.enmLastSeenGuestMode))); 1565 pVCpu->hm.s.vmx.enmLastSeenGuestMode = pVCpu->hm.s.vmx.enmPrevGuestMode; 1566 1566 } 1567 1567 } 1568 1568 1569 1569 /* Reset the contents of the read cache. */ 1570 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;1570 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 1571 1571 for (unsigned j = 0; j < pCache->Read.cValidEntries; j++) 1572 1572 pCache->Read.aFieldVal[j] = 0; … … 1575 1575 1576 1576 /** 1577 * Terminates the H WACCM.1577 * Terminates the HM. 1578 1578 * 1579 1579 * Termination means cleaning up and freeing all resources, … … 1583 1583 * @param pVM Pointer to the VM. 1584 1584 */ 1585 VMMR3DECL(int) H WACCMR3Term(PVM pVM)1586 { 1587 if (pVM->h waccm.s.vmx.pRealModeTSS)1588 { 1589 PDMR3VMMDevHeapFree(pVM, pVM->h waccm.s.vmx.pRealModeTSS);1590 pVM->h waccm.s.vmx.pRealModeTSS = 0;1591 } 1592 h waccmR3TermCPU(pVM);1585 VMMR3DECL(int) HMR3Term(PVM pVM) 1586 { 1587 if (pVM->hm.s.vmx.pRealModeTSS) 1588 { 1589 PDMR3VMMDevHeapFree(pVM, pVM->hm.s.vmx.pRealModeTSS); 1590 pVM->hm.s.vmx.pRealModeTSS = 0; 1591 } 1592 hmR3TermCPU(pVM); 1593 1593 return 0; 1594 1594 } … … 1596 1596 1597 1597 /** 1598 * Terminates the per-VCPU H WACCM.1598 * Terminates the per-VCPU HM. 1599 1599 * 1600 1600 * @returns VBox status code. 1601 1601 * @param pVM Pointer to the VM. 1602 1602 */ 1603 static int h waccmR3TermCPU(PVM pVM)1603 static int hmR3TermCPU(PVM pVM) 1604 1604 { 1605 1605 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 1608 1608 1609 1609 #ifdef VBOX_WITH_STATISTICS 1610 if (pVCpu->h waccm.s.paStatExitReason)1610 if (pVCpu->hm.s.paStatExitReason) 1611 1611 { 1612 MMHyperFree(pVM, pVCpu->h waccm.s.paStatExitReason);1613 pVCpu->h waccm.s.paStatExitReason = NULL;1614 pVCpu->h waccm.s.paStatExitReasonR0 = NIL_RTR0PTR;1612 MMHyperFree(pVM, pVCpu->hm.s.paStatExitReason); 1613 pVCpu->hm.s.paStatExitReason = NULL; 1614 pVCpu->hm.s.paStatExitReasonR0 = NIL_RTR0PTR; 1615 1615 } 1616 if (pVCpu->h waccm.s.paStatInjectedIrqs)1616 if (pVCpu->hm.s.paStatInjectedIrqs) 1617 1617 { 1618 MMHyperFree(pVM, pVCpu->h waccm.s.paStatInjectedIrqs);1619 pVCpu->h waccm.s.paStatInjectedIrqs = NULL;1620 pVCpu->h waccm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR;1618 MMHyperFree(pVM, pVCpu->hm.s.paStatInjectedIrqs); 1619 pVCpu->hm.s.paStatInjectedIrqs = NULL; 1620 pVCpu->hm.s.paStatInjectedIrqsR0 = NIL_RTR0PTR; 1621 1621 } 1622 1622 #endif 1623 1623 1624 1624 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 1625 memset(pVCpu->h waccm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hwaccm.s.vmx.VMCSCache.aMagic));1626 pVCpu->h waccm.s.vmx.VMCSCache.uMagic = 0;1627 pVCpu->h waccm.s.vmx.VMCSCache.uPos = 0xffffffff;1625 memset(pVCpu->hm.s.vmx.VMCSCache.aMagic, 0, sizeof(pVCpu->hm.s.vmx.VMCSCache.aMagic)); 1626 pVCpu->hm.s.vmx.VMCSCache.uMagic = 0; 1627 pVCpu->hm.s.vmx.VMCSCache.uPos = 0xffffffff; 1628 1628 #endif 1629 1629 } … … 1635 1635 * Resets a virtual CPU. 1636 1636 * 1637 * Used by H WACCMR3Reset and CPU hot plugging.1637 * Used by HMR3Reset and CPU hot plugging. 1638 1638 * 1639 1639 * @param pVCpu The CPU to reset. 1640 1640 */ 1641 VMMR3DECL(void) H WACCMR3ResetCpu(PVMCPU pVCpu)1641 VMMR3DECL(void) HMR3ResetCpu(PVMCPU pVCpu) 1642 1642 { 1643 1643 /* On first entry we'll sync everything. */ 1644 pVCpu->h waccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;1645 1646 pVCpu->h waccm.s.vmx.cr0_mask = 0;1647 pVCpu->h waccm.s.vmx.cr4_mask = 0;1648 1649 pVCpu->h waccm.s.fActive = false;1650 pVCpu->h waccm.s.Event.fPending = false;1644 pVCpu->hm.s.fContextUseFlags = HM_CHANGED_ALL; 1645 1646 pVCpu->hm.s.vmx.cr0_mask = 0; 1647 pVCpu->hm.s.vmx.cr4_mask = 0; 1648 1649 pVCpu->hm.s.fActive = false; 1650 pVCpu->hm.s.Event.fPending = false; 1651 1651 1652 1652 /* Reset state information for real-mode emulation in VT-x. */ 1653 pVCpu->h waccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;1654 pVCpu->h waccm.s.vmx.enmPrevGuestMode = PGMMODE_REAL;1655 pVCpu->h waccm.s.vmx.enmCurrGuestMode = PGMMODE_REAL;1653 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL; 1654 pVCpu->hm.s.vmx.enmPrevGuestMode = PGMMODE_REAL; 1655 pVCpu->hm.s.vmx.enmCurrGuestMode = PGMMODE_REAL; 1656 1656 1657 1657 /* Reset the contents of the read cache. */ 1658 PVMCSCACHE pCache = &pVCpu->h waccm.s.vmx.VMCSCache;1658 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache; 1659 1659 for (unsigned j = 0; j < pCache->Read.cValidEntries; j++) 1660 1660 pCache->Read.aFieldVal[j] = 0; … … 1671 1671 * The VM is being reset. 1672 1672 * 1673 * For the H WACCM component this means that any GDT/LDT/TSS monitors1673 * For the HM component this means that any GDT/LDT/TSS monitors 1674 1674 * needs to be removed. 1675 1675 * 1676 1676 * @param pVM Pointer to the VM. 1677 1677 */ 1678 VMMR3DECL(void) H WACCMR3Reset(PVM pVM)1679 { 1680 LogFlow(("H WACCMR3Reset:\n"));1681 1682 if (pVM->fH WACCMEnabled)1683 h waccmR3DisableRawMode(pVM);1678 VMMR3DECL(void) HMR3Reset(PVM pVM) 1679 { 1680 LogFlow(("HMR3Reset:\n")); 1681 1682 if (pVM->fHMEnabled) 1683 hmR3DisableRawMode(pVM); 1684 1684 1685 1685 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 1687 1687 PVMCPU pVCpu = &pVM->aCpus[i]; 1688 1688 1689 H WACCMR3ResetCpu(pVCpu);1689 HMR3ResetCpu(pVCpu); 1690 1690 } 1691 1691 1692 1692 /* Clear all patch information. */ 1693 pVM->h waccm.s.pGuestPatchMem = 0;1694 pVM->h waccm.s.pFreeGuestPatchMem = 0;1695 pVM->h waccm.s.cbGuestPatchMem = 0;1696 pVM->h waccm.s.cPatches = 0;1697 pVM->h waccm.s.PatchTree = 0;1698 pVM->h waccm.s.fTPRPatchingActive = false;1699 ASMMemZero32(pVM->h waccm.s.aPatches, sizeof(pVM->hwaccm.s.aPatches));1693 pVM->hm.s.pGuestPatchMem = 0; 1694 pVM->hm.s.pFreeGuestPatchMem = 0; 1695 pVM->hm.s.cbGuestPatchMem = 0; 1696 pVM->hm.s.cPatches = 0; 1697 pVM->hm.s.PatchTree = 0; 1698 pVM->hm.s.fTPRPatchingActive = false; 1699 ASMMemZero32(pVM->hm.s.aPatches, sizeof(pVM->hm.s.aPatches)); 1700 1700 } 1701 1701 … … 1709 1709 * @param pvUser Unused. 1710 1710 */ 1711 DECLCALLBACK(VBOXSTRICTRC) h waccmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser)1711 DECLCALLBACK(VBOXSTRICTRC) hmR3RemovePatches(PVM pVM, PVMCPU pVCpu, void *pvUser) 1712 1712 { 1713 1713 VMCPUID idCpu = (VMCPUID)(uintptr_t)pvUser; … … 1717 1717 return VINF_SUCCESS; 1718 1718 1719 Log(("h waccmR3RemovePatches\n"));1720 for (unsigned i = 0; i < pVM->h waccm.s.cPatches; i++)1719 Log(("hmR3RemovePatches\n")); 1720 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++) 1721 1721 { 1722 1722 uint8_t abInstr[15]; 1723 PH WACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];1723 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i]; 1724 1724 RTGCPTR pInstrGC = (RTGCPTR)pPatch->Core.Key; 1725 1725 int rc; … … 1758 1758 #endif 1759 1759 } 1760 pVM->h waccm.s.cPatches = 0;1761 pVM->h waccm.s.PatchTree = 0;1762 pVM->h waccm.s.pFreeGuestPatchMem = pVM->hwaccm.s.pGuestPatchMem;1763 pVM->h waccm.s.fTPRPatchingActive = false;1760 pVM->hm.s.cPatches = 0; 1761 pVM->hm.s.PatchTree = 0; 1762 pVM->hm.s.pFreeGuestPatchMem = pVM->hm.s.pGuestPatchMem; 1763 pVM->hm.s.fTPRPatchingActive = false; 1764 1764 return VINF_SUCCESS; 1765 1765 } … … 1771 1771 * @returns VBox status code. 1772 1772 * @param pVM Pointer to the VM. 1773 * @param idCpu VCPU to execute h waccmR3RemovePatches on.1773 * @param idCpu VCPU to execute hmR3RemovePatches on. 1774 1774 * @param pPatchMem Patch memory range. 1775 1775 * @param cbPatchMem Size of the memory range. 1776 1776 */ 1777 static int h waccmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem)1778 { 1779 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, h waccmR3RemovePatches, (void *)(uintptr_t)idCpu);1777 static int hmR3EnablePatching(PVM pVM, VMCPUID idCpu, RTRCPTR pPatchMem, unsigned cbPatchMem) 1778 { 1779 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)idCpu); 1780 1780 AssertRC(rc); 1781 1781 1782 pVM->h waccm.s.pGuestPatchMem = pPatchMem;1783 pVM->h waccm.s.pFreeGuestPatchMem = pPatchMem;1784 pVM->h waccm.s.cbGuestPatchMem = cbPatchMem;1782 pVM->hm.s.pGuestPatchMem = pPatchMem; 1783 pVM->hm.s.pFreeGuestPatchMem = pPatchMem; 1784 pVM->hm.s.cbGuestPatchMem = cbPatchMem; 1785 1785 return VINF_SUCCESS; 1786 1786 } … … 1795 1795 * @param cbPatchMem Size of the memory range. 1796 1796 */ 1797 VMMR3DECL(int) H WACMMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)1797 VMMR3DECL(int) HMR3EnablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem) 1798 1798 { 1799 1799 VM_ASSERT_EMT(pVM); 1800 Log(("H WACMMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem));1800 Log(("HMR3EnablePatching %RGv size %x\n", pPatchMem, cbPatchMem)); 1801 1801 if (pVM->cCpus > 1) 1802 1802 { 1803 1803 /* We own the IOM lock here and could cause a deadlock by waiting for a VCPU that is blocking on the IOM lock. */ 1804 1804 int rc = VMR3ReqCallNoWait(pVM, VMCPUID_ANY_QUEUE, 1805 (PFNRT)h waccmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);1805 (PFNRT)hmR3EnablePatching, 4, pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem); 1806 1806 AssertRC(rc); 1807 1807 return rc; 1808 1808 } 1809 return h waccmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem);1809 return hmR3EnablePatching(pVM, VMMGetCpuId(pVM), (RTRCPTR)pPatchMem, cbPatchMem); 1810 1810 } 1811 1811 … … 1819 1819 * @param cbPatchMem Size of the memory range. 1820 1820 */ 1821 VMMR3DECL(int) H WACMMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem)1822 { 1823 Log(("H WACMMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem));1824 1825 Assert(pVM->h waccm.s.pGuestPatchMem == pPatchMem);1826 Assert(pVM->h waccm.s.cbGuestPatchMem == cbPatchMem);1821 VMMR3DECL(int) HMR3DisablePatching(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem) 1822 { 1823 Log(("HMR3DisablePatching %RGv size %x\n", pPatchMem, cbPatchMem)); 1824 1825 Assert(pVM->hm.s.pGuestPatchMem == pPatchMem); 1826 Assert(pVM->hm.s.cbGuestPatchMem == cbPatchMem); 1827 1827 1828 1828 /* @todo Potential deadlock when other VCPUs are waiting on the IOM lock (we own it)!! */ 1829 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, h waccmR3RemovePatches, (void *)(uintptr_t)VMMGetCpuId(pVM));1829 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, hmR3RemovePatches, (void *)(uintptr_t)VMMGetCpuId(pVM)); 1830 1830 AssertRC(rc); 1831 1831 1832 pVM->h waccm.s.pGuestPatchMem = 0;1833 pVM->h waccm.s.pFreeGuestPatchMem = 0;1834 pVM->h waccm.s.cbGuestPatchMem = 0;1835 pVM->h waccm.s.fTPRPatchingActive = false;1832 pVM->hm.s.pGuestPatchMem = 0; 1833 pVM->hm.s.pFreeGuestPatchMem = 0; 1834 pVM->hm.s.cbGuestPatchMem = 0; 1835 pVM->hm.s.fTPRPatchingActive = false; 1836 1836 return VINF_SUCCESS; 1837 1837 } … … 1847 1847 * 1848 1848 */ 1849 DECLCALLBACK(VBOXSTRICTRC) h waccmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)1849 DECLCALLBACK(VBOXSTRICTRC) hmR3ReplaceTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser) 1850 1850 { 1851 1851 /* … … 1863 1863 */ 1864 1864 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1865 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);1865 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 1866 1866 if (pPatch) 1867 1867 { 1868 Log(("h waccmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip));1868 Log(("hmR3ReplaceTprInstr: already patched %RGv\n", pCtx->rip)); 1869 1869 return VINF_SUCCESS; 1870 1870 } 1871 uint32_t const idx = pVM->h waccm.s.cPatches;1872 if (idx >= RT_ELEMENTS(pVM->h waccm.s.aPatches))1873 { 1874 Log(("h waccmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip));1871 uint32_t const idx = pVM->hm.s.cPatches; 1872 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches)) 1873 { 1874 Log(("hmR3ReplaceTprInstr: no available patch slots (%RGv)\n", pCtx->rip)); 1875 1875 return VINF_SUCCESS; 1876 1876 } 1877 pPatch = &pVM->h waccm.s.aPatches[idx];1878 1879 Log(("h waccmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));1877 pPatch = &pVM->hm.s.aPatches[idx]; 1878 1879 Log(("hmR3ReplaceTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx)); 1880 1880 1881 1881 /* 1882 1882 * Disassembler the instruction and get cracking. 1883 1883 */ 1884 DBGFR3DisasInstrCurrentLog(pVCpu, "h waccmR3ReplaceTprInstr");1885 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;1884 DBGFR3DisasInstrCurrentLog(pVCpu, "hmR3ReplaceTprInstr"); 1885 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 1886 1886 uint32_t cbOp; 1887 1887 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp); … … 1903 1903 if (pDis->Param2.fUse == DISUSE_REG_GEN32) 1904 1904 { 1905 pPatch->enmType = H WACCMTPRINSTR_WRITE_REG;1905 pPatch->enmType = HMTPRINSTR_WRITE_REG; 1906 1906 pPatch->uSrcOperand = pDis->Param2.Base.idxGenReg; 1907 Log(("h waccmR3ReplaceTprInstr: HWACCMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg));1907 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_REG %u\n", pDis->Param2.Base.idxGenReg)); 1908 1908 } 1909 1909 else 1910 1910 { 1911 1911 Assert(pDis->Param2.fUse == DISUSE_IMMEDIATE32); 1912 pPatch->enmType = H WACCMTPRINSTR_WRITE_IMM;1912 pPatch->enmType = HMTPRINSTR_WRITE_IMM; 1913 1913 pPatch->uSrcOperand = pDis->Param2.uValue; 1914 Log(("h waccmR3ReplaceTprInstr: HWACCMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue));1914 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_WRITE_IMM %#llx\n", pDis->Param2.uValue)); 1915 1915 } 1916 1916 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pCtx->rip, s_abVMMCall, sizeof(s_abVMMCall)); … … 1947 1947 && pDis->Param2.fUse == DISUSE_IMMEDIATE8 1948 1948 && pDis->Param2.uValue == 4 1949 && cbOpMmio + cbOp < sizeof(pVM->h waccm.s.aPatches[idx].aOpcode))1949 && cbOpMmio + cbOp < sizeof(pVM->hm.s.aPatches[idx].aOpcode)) 1950 1950 { 1951 1951 uint8_t abInstr[15]; … … 1972 1972 1973 1973 Log(("Acceptable read/shr candidate!\n")); 1974 pPatch->enmType = H WACCMTPRINSTR_READ_SHR4;1974 pPatch->enmType = HMTPRINSTR_READ_SHR4; 1975 1975 } 1976 1976 else 1977 1977 { 1978 pPatch->enmType = H WACCMTPRINSTR_READ;1978 pPatch->enmType = HMTPRINSTR_READ; 1979 1979 pPatch->uDstOperand = idxMmioReg; 1980 1980 … … 1984 1984 memcpy(pPatch->aNewOpcode, s_abVMMCall, sizeof(s_abVMMCall)); 1985 1985 pPatch->cbNewOp = sizeof(s_abVMMCall); 1986 Log(("h waccmR3ReplaceTprInstr: HWACCMTPRINSTR_READ %u\n", pPatch->uDstOperand));1986 Log(("hmR3ReplaceTprInstr: HMTPRINSTR_READ %u\n", pPatch->uDstOperand)); 1987 1987 } 1988 1988 } 1989 1989 1990 1990 pPatch->Core.Key = pCtx->eip; 1991 rc = RTAvloU32Insert(&pVM->h waccm.s.PatchTree, &pPatch->Core);1991 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core); 1992 1992 AssertRC(rc); 1993 1993 1994 pVM->h waccm.s.cPatches++;1995 STAM_COUNTER_INC(&pVM->h waccm.s.StatTPRReplaceSuccess);1994 pVM->hm.s.cPatches++; 1995 STAM_COUNTER_INC(&pVM->hm.s.StatTPRReplaceSuccess); 1996 1996 return VINF_SUCCESS; 1997 1997 } … … 2000 2000 * Save invalid patch, so we will not try again. 2001 2001 */ 2002 Log(("h waccmR3ReplaceTprInstr: Failed to patch instr!\n"));2002 Log(("hmR3ReplaceTprInstr: Failed to patch instr!\n")); 2003 2003 pPatch->Core.Key = pCtx->eip; 2004 pPatch->enmType = H WACCMTPRINSTR_INVALID;2005 rc = RTAvloU32Insert(&pVM->h waccm.s.PatchTree, &pPatch->Core);2004 pPatch->enmType = HMTPRINSTR_INVALID; 2005 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core); 2006 2006 AssertRC(rc); 2007 pVM->h waccm.s.cPatches++;2008 STAM_COUNTER_INC(&pVM->h waccm.s.StatTPRReplaceFailure);2007 pVM->hm.s.cPatches++; 2008 STAM_COUNTER_INC(&pVM->hm.s.StatTPRReplaceFailure); 2009 2009 return VINF_SUCCESS; 2010 2010 } … … 2020 2020 * 2021 2021 */ 2022 DECLCALLBACK(VBOXSTRICTRC) h waccmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser)2022 DECLCALLBACK(VBOXSTRICTRC) hmR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, void *pvUser) 2023 2023 { 2024 2024 /* … … 2036 2036 */ 2037 2037 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 2038 PH WACCMTPRPATCH pPatch = (PHWACCMTPRPATCH)RTAvloU32Get(&pVM->hwaccm.s.PatchTree, (AVLOU32KEY)pCtx->eip);2038 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip); 2039 2039 if (pPatch) 2040 2040 { 2041 Log(("h waccmR3PatchTprInstr: already patched %RGv\n", pCtx->rip));2041 Log(("hmR3PatchTprInstr: already patched %RGv\n", pCtx->rip)); 2042 2042 return VINF_SUCCESS; 2043 2043 } 2044 uint32_t const idx = pVM->h waccm.s.cPatches;2045 if (idx >= RT_ELEMENTS(pVM->h waccm.s.aPatches))2046 { 2047 Log(("h waccmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip));2044 uint32_t const idx = pVM->hm.s.cPatches; 2045 if (idx >= RT_ELEMENTS(pVM->hm.s.aPatches)) 2046 { 2047 Log(("hmR3PatchTprInstr: no available patch slots (%RGv)\n", pCtx->rip)); 2048 2048 return VINF_SUCCESS; 2049 2049 } 2050 pPatch = &pVM->h waccm.s.aPatches[idx];2051 2052 Log(("h waccmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx));2053 DBGFR3DisasInstrCurrentLog(pVCpu, "h waccmR3PatchTprInstr");2050 pPatch = &pVM->hm.s.aPatches[idx]; 2051 2052 Log(("hmR3PatchTprInstr: rip=%RGv idxPatch=%u\n", pCtx->rip, idx)); 2053 DBGFR3DisasInstrCurrentLog(pVCpu, "hmR3PatchTprInstr"); 2054 2054 2055 2055 /* 2056 2056 * Disassemble the instruction and get cracking. 2057 2057 */ 2058 PDISCPUSTATE pDis = &pVCpu->h waccm.s.DisState;2058 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState; 2059 2059 uint32_t cbOp; 2060 2060 int rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp); … … 2071 2071 2072 2072 pPatch->cbOp = cbOp; 2073 pPatch->enmType = H WACCMTPRINSTR_JUMP_REPLACEMENT;2073 pPatch->enmType = HMTPRINSTR_JUMP_REPLACEMENT; 2074 2074 2075 2075 if (pDis->Param1.fUse == DISUSE_DISPLACEMENT32) … … 2177 2177 } 2178 2178 aPatch[off++] = 0xE9; /* jmp return_address */ 2179 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->h waccm.s.pFreeGuestPatchMem + off + 4);2179 *(RTRCUINTPTR *)&aPatch[off] = ((RTRCUINTPTR)pCtx->eip + cbOp) - ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem + off + 4); 2180 2180 off += sizeof(RTRCUINTPTR); 2181 2181 2182 if (pVM->h waccm.s.pFreeGuestPatchMem + off <= pVM->hwaccm.s.pGuestPatchMem + pVM->hwaccm.s.cbGuestPatchMem)2182 if (pVM->hm.s.pFreeGuestPatchMem + off <= pVM->hm.s.pGuestPatchMem + pVM->hm.s.cbGuestPatchMem) 2183 2183 { 2184 2184 /* Write new code to the patch buffer. */ 2185 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->h waccm.s.pFreeGuestPatchMem, aPatch, off);2185 rc = PGMPhysSimpleWriteGCPtr(pVCpu, pVM->hm.s.pFreeGuestPatchMem, aPatch, off); 2186 2186 AssertRC(rc); 2187 2187 2188 2188 #ifdef LOG_ENABLED 2189 2189 uint32_t cbCurInstr; 2190 for (RTGCPTR GCPtrInstr = pVM->h waccm.s.pFreeGuestPatchMem;2191 GCPtrInstr < pVM->h waccm.s.pFreeGuestPatchMem + off;2190 for (RTGCPTR GCPtrInstr = pVM->hm.s.pFreeGuestPatchMem; 2191 GCPtrInstr < pVM->hm.s.pFreeGuestPatchMem + off; 2192 2192 GCPtrInstr += RT_MAX(cbCurInstr, 1)) 2193 2193 { … … 2203 2203 2204 2204 pPatch->aNewOpcode[0] = 0xE9; 2205 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->h waccm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5);2205 *(RTRCUINTPTR *)&pPatch->aNewOpcode[1] = ((RTRCUINTPTR)pVM->hm.s.pFreeGuestPatchMem) - ((RTRCUINTPTR)pCtx->eip + 5); 2206 2206 2207 2207 /* Overwrite the TPR instruction with a jump. */ … … 2211 2211 DBGFR3DisasInstrCurrentLog(pVCpu, "Jump"); 2212 2212 2213 pVM->h waccm.s.pFreeGuestPatchMem += off;2213 pVM->hm.s.pFreeGuestPatchMem += off; 2214 2214 pPatch->cbNewOp = 5; 2215 2215 2216 2216 pPatch->Core.Key = pCtx->eip; 2217 rc = RTAvloU32Insert(&pVM->h waccm.s.PatchTree, &pPatch->Core);2217 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core); 2218 2218 AssertRC(rc); 2219 2219 2220 pVM->h waccm.s.cPatches++;2221 pVM->h waccm.s.fTPRPatchingActive = true;2222 STAM_COUNTER_INC(&pVM->h waccm.s.StatTPRPatchSuccess);2220 pVM->hm.s.cPatches++; 2221 pVM->hm.s.fTPRPatchingActive = true; 2222 STAM_COUNTER_INC(&pVM->hm.s.StatTPRPatchSuccess); 2223 2223 return VINF_SUCCESS; 2224 2224 } … … 2227 2227 } 2228 2228 else 2229 Log(("h waccmR3PatchTprInstr: Failed to patch instr!\n"));2229 Log(("hmR3PatchTprInstr: Failed to patch instr!\n")); 2230 2230 2231 2231 … … 2233 2233 * Save invalid patch, so we will not try again. 2234 2234 */ 2235 pPatch = &pVM->h waccm.s.aPatches[idx];2235 pPatch = &pVM->hm.s.aPatches[idx]; 2236 2236 pPatch->Core.Key = pCtx->eip; 2237 pPatch->enmType = H WACCMTPRINSTR_INVALID;2238 rc = RTAvloU32Insert(&pVM->h waccm.s.PatchTree, &pPatch->Core);2237 pPatch->enmType = HMTPRINSTR_INVALID; 2238 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core); 2239 2239 AssertRC(rc); 2240 pVM->h waccm.s.cPatches++;2241 STAM_COUNTER_INC(&pVM->h waccm.s.StatTPRPatchFailure);2240 pVM->hm.s.cPatches++; 2241 STAM_COUNTER_INC(&pVM->hm.s.StatTPRPatchFailure); 2242 2242 return VINF_SUCCESS; 2243 2243 } … … 2252 2252 * @param pCtx Pointer to the guest CPU context. 2253 2253 */ 2254 VMMR3DECL(int) H WACCMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)2254 VMMR3DECL(int) HMR3PatchTprInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2255 2255 { 2256 2256 NOREF(pCtx); 2257 2257 int rc = VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ONE_BY_ONE, 2258 pVM->h waccm.s.pGuestPatchMem ? hwaccmR3PatchTprInstr : hwaccmR3ReplaceTprInstr,2258 pVM->hm.s.pGuestPatchMem ? hmR3PatchTprInstr : hmR3ReplaceTprInstr, 2259 2259 (void *)(uintptr_t)pVCpu->idCpu); 2260 2260 AssertRC(rc); … … 2270 2270 * @param pCtx Partial VM execution context. 2271 2271 */ 2272 VMMR3DECL(int) H WACCMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx)2272 VMMR3DECL(int) HMR3EmulateIoBlock(PVM pVM, PCPUMCTX pCtx) 2273 2273 { 2274 2274 PVMCPU pVCpu = VMMGetCpu(pVM); 2275 2275 2276 Assert(pVM->fH WACCMEnabled);2277 Log(("H WACCMR3EmulateIoBlock\n"));2276 Assert(pVM->fHMEnabled); 2277 Log(("HMR3EmulateIoBlock\n")); 2278 2278 2279 2279 /* This is primarily intended to speed up Grub, so we don't care about paged protected mode. */ 2280 if (H WACCMCanEmulateIoBlockEx(pCtx))2281 { 2282 Log(("H WACCMR3EmulateIoBlock -> enabled\n"));2283 pVCpu->h waccm.s.EmulateIoBlock.fEnabled = true;2284 pVCpu->h waccm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip;2285 pVCpu->h waccm.s.EmulateIoBlock.cr0 = pCtx->cr0;2280 if (HMCanEmulateIoBlockEx(pCtx)) 2281 { 2282 Log(("HMR3EmulateIoBlock -> enabled\n")); 2283 pVCpu->hm.s.EmulateIoBlock.fEnabled = true; 2284 pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip = pCtx->rip; 2285 pVCpu->hm.s.EmulateIoBlock.cr0 = pCtx->cr0; 2286 2286 return VINF_EM_RESCHEDULE_REM; 2287 2287 } … … 2297 2297 * @param pCtx Partial VM execution context. 2298 2298 */ 2299 VMMR3DECL(bool) H WACCMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx)2299 VMMR3DECL(bool) HMR3CanExecuteGuest(PVM pVM, PCPUMCTX pCtx) 2300 2300 { 2301 2301 PVMCPU pVCpu = VMMGetCpu(pVM); 2302 2302 2303 Assert(pVM->fH WACCMEnabled);2303 Assert(pVM->fHMEnabled); 2304 2304 2305 2305 /* If we're still executing the IO code, then return false. */ 2306 if ( RT_UNLIKELY(pVCpu->h waccm.s.EmulateIoBlock.fEnabled)2307 && pCtx->rip < pVCpu->h waccm.s.EmulateIoBlock.GCPtrFunctionEip + 0x2002308 && pCtx->rip > pVCpu->h waccm.s.EmulateIoBlock.GCPtrFunctionEip - 0x2002309 && pCtx->cr0 == pVCpu->h waccm.s.EmulateIoBlock.cr0)2306 if ( RT_UNLIKELY(pVCpu->hm.s.EmulateIoBlock.fEnabled) 2307 && pCtx->rip < pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip + 0x200 2308 && pCtx->rip > pVCpu->hm.s.EmulateIoBlock.GCPtrFunctionEip - 0x200 2309 && pCtx->cr0 == pVCpu->hm.s.EmulateIoBlock.cr0) 2310 2310 return false; 2311 2311 2312 pVCpu->h waccm.s.EmulateIoBlock.fEnabled = false;2312 pVCpu->hm.s.EmulateIoBlock.fEnabled = false; 2313 2313 2314 2314 /* AMD-V supports real & protected mode with or without paging. */ 2315 if (pVM->h waccm.s.svm.fEnabled)2316 { 2317 pVCpu->h waccm.s.fActive = true;2315 if (pVM->hm.s.svm.fEnabled) 2316 { 2317 pVCpu->hm.s.fActive = true; 2318 2318 return true; 2319 2319 } 2320 2320 2321 pVCpu->h waccm.s.fActive = false;2321 pVCpu->hm.s.fActive = false; 2322 2322 2323 2323 /* Note! The context supplied by REM is partial. If we add more checks here, be sure to verify that REM provides this info! */ 2324 Assert((pVM->h waccm.s.vmx.fUnrestrictedGuest && !pVM->hwaccm.s.vmx.pRealModeTSS) || (!pVM->hwaccm.s.vmx.fUnrestrictedGuest && pVM->hwaccm.s.vmx.pRealModeTSS));2325 2326 bool fSupportsRealMode = pVM->h waccm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM);2327 if (!pVM->h waccm.s.vmx.fUnrestrictedGuest)2324 Assert((pVM->hm.s.vmx.fUnrestrictedGuest && !pVM->hm.s.vmx.pRealModeTSS) || (!pVM->hm.s.vmx.fUnrestrictedGuest && pVM->hm.s.vmx.pRealModeTSS)); 2325 2326 bool fSupportsRealMode = pVM->hm.s.vmx.fUnrestrictedGuest || PDMVMMDevHeapIsEnabled(pVM); 2327 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 2328 2328 { 2329 2329 /* … … 2362 2362 mode. VT-x can't handle the CPU state right after a switch 2363 2363 from real to protected mode. (all sorts of RPL & DPL assumptions) */ 2364 if ( pVCpu->h waccm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL2364 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL 2365 2365 && enmGuestMode >= PGMMODE_PROTECTED) 2366 2366 { … … 2387 2387 { 2388 2388 if ( !CPUMIsGuestInLongModeEx(pCtx) 2389 && !pVM->h waccm.s.vmx.fUnrestrictedGuest)2389 && !pVM->hm.s.vmx.fUnrestrictedGuest) 2390 2390 { 2391 2391 /** @todo This should (probably) be set on every excursion to the REM, … … 2393 2393 * back to REM for real mode execution. (The XP hack below doesn't 2394 2394 * work reliably without this.) 2395 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_H WACCM. */2396 pVM->aCpus[0].h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;2397 2398 if ( !pVM->h waccm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/2395 * Update: Implemented in EM.cpp, see #ifdef EM_NOTIFY_HM. */ 2396 pVM->aCpus[0].hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 2397 2398 if ( !pVM->hm.s.fNestedPaging /* requires a fake PD for real *and* protected mode without paging - stored in the VMM device heap*/ 2399 2399 || CPUMIsGuestInRealModeEx(pCtx)) /* requires a fake TSS for real mode - stored in the VMM device heap */ 2400 2400 return false; … … 2431 2431 } 2432 2432 2433 if (pVM->h waccm.s.vmx.fEnabled)2433 if (pVM->hm.s.vmx.fEnabled) 2434 2434 { 2435 2435 uint32_t mask; 2436 2436 2437 2437 /* if bit N is set in cr0_fixed0, then it must be set in the guest's cr0. */ 2438 mask = (uint32_t)pVM->h waccm.s.vmx.msr.vmx_cr0_fixed0;2439 /* Note: We ignore the NE bit here on purpose; see vmmr0\h waccmr0.cpp for details. */2438 mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr0_fixed0; 2439 /* Note: We ignore the NE bit here on purpose; see vmmr0\hmr0.cpp for details. */ 2440 2440 mask &= ~X86_CR0_NE; 2441 2441 … … 2454 2454 2455 2455 /* if bit N is cleared in cr0_fixed1, then it must be zero in the guest's cr0. */ 2456 mask = (uint32_t)~pVM->h waccm.s.vmx.msr.vmx_cr0_fixed1;2456 mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr0_fixed1; 2457 2457 if ((pCtx->cr0 & mask) != 0) 2458 2458 return false; 2459 2459 2460 2460 /* if bit N is set in cr4_fixed0, then it must be set in the guest's cr4. */ 2461 mask = (uint32_t)pVM->h waccm.s.vmx.msr.vmx_cr4_fixed0;2461 mask = (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0; 2462 2462 mask &= ~X86_CR4_VMXE; 2463 2463 if ((pCtx->cr4 & mask) != mask) … … 2465 2465 2466 2466 /* if bit N is cleared in cr4_fixed1, then it must be zero in the guest's cr4. */ 2467 mask = (uint32_t)~pVM->h waccm.s.vmx.msr.vmx_cr4_fixed1;2467 mask = (uint32_t)~pVM->hm.s.vmx.msr.vmx_cr4_fixed1; 2468 2468 if ((pCtx->cr4 & mask) != 0) 2469 2469 return false; 2470 2470 2471 pVCpu->h waccm.s.fActive = true;2471 pVCpu->hm.s.fActive = true; 2472 2472 return true; 2473 2473 } … … 2484 2484 * @param pCtx VM execution context. 2485 2485 */ 2486 VMMR3DECL(bool) H WACCMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx)2486 VMMR3DECL(bool) HMR3IsRescheduleRequired(PVM pVM, PCPUMCTX pCtx) 2487 2487 { 2488 2488 /* … … 2490 2490 * when the unrestricted guest execution feature is missing (VT-x only). 2491 2491 */ 2492 if ( pVM->h waccm.s.vmx.fEnabled2493 && !pVM->h waccm.s.vmx.fUnrestrictedGuest2492 if ( pVM->hm.s.vmx.fEnabled 2493 && !pVM->hm.s.vmx.fUnrestrictedGuest 2494 2494 && !CPUMIsGuestInPagedProtectedModeEx(pCtx) 2495 2495 && !PDMVMMDevHeapIsEnabled(pVM) 2496 && (pVM->h waccm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx)))2496 && (pVM->hm.s.fNestedPaging || CPUMIsGuestInRealModeEx(pCtx))) 2497 2497 return true; 2498 2498 … … 2507 2507 * @param pVCpu Pointer to the current VMCPU. 2508 2508 */ 2509 VMMR3DECL(void) H WACCMR3NotifyScheduled(PVMCPU pVCpu)2510 { 2511 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;2509 VMMR3DECL(void) HMR3NotifyScheduled(PVMCPU pVCpu) 2510 { 2511 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 2512 2512 } 2513 2513 … … 2518 2518 * @param pVCpu Pointer to the VMCPU. 2519 2519 */ 2520 VMMR3DECL(void) H WACCMR3NotifyEmulated(PVMCPU pVCpu)2521 { 2522 pVCpu->h waccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL_GUEST;2520 VMMR3DECL(void) HMR3NotifyEmulated(PVMCPU pVCpu) 2521 { 2522 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST; 2523 2523 } 2524 2524 … … 2530 2530 * @param pVCpu Pointer to the VMCPU. 2531 2531 */ 2532 VMMR3DECL(bool) H WACCMR3IsActive(PVMCPU pVCpu)2533 { 2534 return pVCpu->h waccm.s.fActive;2532 VMMR3DECL(bool) HMR3IsActive(PVMCPU pVCpu) 2533 { 2534 return pVCpu->hm.s.fActive; 2535 2535 } 2536 2536 … … 2542 2542 * @param pVM Pointer to the VM. 2543 2543 */ 2544 VMMR3DECL(bool) H WACCMR3IsNestedPagingActive(PVM pVM)2545 { 2546 return pVM->h waccm.s.fNestedPaging;2544 VMMR3DECL(bool) HMR3IsNestedPagingActive(PVM pVM) 2545 { 2546 return pVM->hm.s.fNestedPaging; 2547 2547 } 2548 2548 … … 2554 2554 * @param pVM Pointer to the VM. 2555 2555 */ 2556 VMMR3DECL(bool) H WACCMR3IsVPIDActive(PVM pVM)2557 { 2558 return pVM->h waccm.s.vmx.fVPID;2556 VMMR3DECL(bool) HMR3IsVPIDActive(PVM pVM) 2557 { 2558 return pVM->hm.s.vmx.fVPID; 2559 2559 } 2560 2560 … … 2566 2566 * @param pVM Pointer to the VM. 2567 2567 */ 2568 VMMR3DECL(bool) H WACCMR3IsEventPending(PVMCPU pVCpu)2569 { 2570 return H WACCMIsEnabled(pVCpu->pVMR3) && pVCpu->hwaccm.s.Event.fPending;2568 VMMR3DECL(bool) HMR3IsEventPending(PVMCPU pVCpu) 2569 { 2570 return HMIsEnabled(pVCpu->pVMR3) && pVCpu->hm.s.Event.fPending; 2571 2571 } 2572 2572 … … 2578 2578 * @param pVM Pointer to the VM. 2579 2579 */ 2580 VMMR3DECL(bool) H WACCMR3IsVmxPreemptionTimerUsed(PVM pVM)2581 { 2582 return H WACCMIsEnabled(pVM)2583 && pVM->h waccm.s.vmx.fEnabled2584 && pVM->h waccm.s.vmx.fUsePreemptTimer;2580 VMMR3DECL(bool) HMR3IsVmxPreemptionTimerUsed(PVM pVM) 2581 { 2582 return HMIsEnabled(pVM) 2583 && pVM->hm.s.vmx.fEnabled 2584 && pVM->hm.s.vmx.fUsePreemptTimer; 2585 2585 } 2586 2586 … … 2600 2600 * @param pCtx Pointer to the guest CPU context. 2601 2601 */ 2602 VMMR3DECL(VBOXSTRICTRC) H WACCMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)2603 { 2604 H WACCMPENDINGIO enmType = pVCpu->hwaccm.s.PendingIO.enmType;2605 2606 pVCpu->h waccm.s.PendingIO.enmType = HWACCMPENDINGIO_INVALID;2607 2608 if ( pVCpu->h waccm.s.PendingIO.GCPtrRip != pCtx->rip2609 || enmType == H WACCMPENDINGIO_INVALID)2602 VMMR3DECL(VBOXSTRICTRC) HMR3RestartPendingIOInstr(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 2603 { 2604 HMPENDINGIO enmType = pVCpu->hm.s.PendingIO.enmType; 2605 2606 pVCpu->hm.s.PendingIO.enmType = HMPENDINGIO_INVALID; 2607 2608 if ( pVCpu->hm.s.PendingIO.GCPtrRip != pCtx->rip 2609 || enmType == HMPENDINGIO_INVALID) 2610 2610 return VERR_NOT_FOUND; 2611 2611 … … 2613 2613 switch (enmType) 2614 2614 { 2615 case H WACCMPENDINGIO_PORT_READ:2615 case HMPENDINGIO_PORT_READ: 2616 2616 { 2617 uint32_t uAndVal = pVCpu->h waccm.s.PendingIO.s.Port.uAndVal;2617 uint32_t uAndVal = pVCpu->hm.s.PendingIO.s.Port.uAndVal; 2618 2618 uint32_t u32Val = 0; 2619 2619 2620 rcStrict = IOMIOPortRead(pVM, pVCpu->h waccm.s.PendingIO.s.Port.uPort,2620 rcStrict = IOMIOPortRead(pVM, pVCpu->hm.s.PendingIO.s.Port.uPort, 2621 2621 &u32Val, 2622 pVCpu->h waccm.s.PendingIO.s.Port.cbSize);2622 pVCpu->hm.s.PendingIO.s.Port.cbSize); 2623 2623 if (IOM_SUCCESS(rcStrict)) 2624 2624 { 2625 2625 /* Write back to the EAX register. */ 2626 2626 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal); 2627 pCtx->rip = pVCpu->h waccm.s.PendingIO.GCPtrRipNext;2627 pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext; 2628 2628 } 2629 2629 break; 2630 2630 } 2631 2631 2632 case H WACCMPENDINGIO_PORT_WRITE:2633 rcStrict = IOMIOPortWrite(pVM, pVCpu->h waccm.s.PendingIO.s.Port.uPort,2634 pCtx->eax & pVCpu->h waccm.s.PendingIO.s.Port.uAndVal,2635 pVCpu->h waccm.s.PendingIO.s.Port.cbSize);2632 case HMPENDINGIO_PORT_WRITE: 2633 rcStrict = IOMIOPortWrite(pVM, pVCpu->hm.s.PendingIO.s.Port.uPort, 2634 pCtx->eax & pVCpu->hm.s.PendingIO.s.Port.uAndVal, 2635 pVCpu->hm.s.PendingIO.s.Port.cbSize); 2636 2636 if (IOM_SUCCESS(rcStrict)) 2637 pCtx->rip = pVCpu->h waccm.s.PendingIO.GCPtrRipNext;2637 pCtx->rip = pVCpu->hm.s.PendingIO.GCPtrRipNext; 2638 2638 break; 2639 2639 … … 2652 2652 * @param pVM Pointer to the VM. 2653 2653 */ 2654 VMMR3DECL(int) H WACCMR3InjectNMI(PVM pVM)2654 VMMR3DECL(int) HMR3InjectNMI(PVM pVM) 2655 2655 { 2656 2656 VMCPU_FF_SET(&pVM->aCpus[0], VMCPU_FF_INTERRUPT_NMI); … … 2666 2666 * @param iStatusCode VBox status code. 2667 2667 */ 2668 VMMR3DECL(void) H WACCMR3CheckError(PVM pVM, int iStatusCode)2668 VMMR3DECL(void) HMR3CheckError(PVM pVM, int iStatusCode) 2669 2669 { 2670 2670 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 2676 2676 2677 2677 case VERR_VMX_INVALID_VMCS_PTR: 2678 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS));2679 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.ulVMCSRevision));2680 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.idEnteredCpu));2681 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.idCurrentCpu));2678 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS)); 2679 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulVMCSRevision)); 2680 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idEnteredCpu)); 2681 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current Cpu %d\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.idCurrentCpu)); 2682 2682 break; 2683 2683 2684 2684 case VERR_VMX_UNABLE_TO_START_VM: 2685 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.ulInstrError));2686 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.ulExitReason));2687 if (pVM->aCpus[i].h waccm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS)2685 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError)); 2686 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulExitReason)); 2687 if (pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS) 2688 2688 { 2689 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].h waccm.s.vmx.pMSRBitmapPhys));2689 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pMSRBitmapPhys)); 2690 2690 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2691 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].h waccm.s.vmx.pGuestMSRPhys));2692 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].h waccm.s.vmx.pHostMSRPhys));2693 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs %x\n", i, pVM->aCpus[i].h waccm.s.vmx.cCachedMSRs));2691 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pGuestMSRPhys)); 2692 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.pHostMSRPhys)); 2693 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs %x\n", i, pVM->aCpus[i].hm.s.vmx.cCachedMSRs)); 2694 2694 #endif 2695 2695 } … … 2700 2700 2701 2701 case VERR_VMX_UNABLE_TO_RESUME_VM: 2702 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.ulInstrError));2703 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].h waccm.s.vmx.lasterror.ulExitReason));2702 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError)); 2703 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hm.s.vmx.lasterror.ulExitReason)); 2704 2704 break; 2705 2705 … … 2711 2711 if (iStatusCode == VERR_VMX_UNABLE_TO_START_VM) 2712 2712 { 2713 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %x\n", pVM->h waccm.s.vmx.msr.vmx_entry.n.allowed1));2714 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %x\n", pVM->h waccm.s.vmx.msr.vmx_entry.n.disallowed0));2713 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry allowed %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.allowed1)); 2714 LogRel(("VERR_VMX_UNABLE_TO_START_VM: VM-entry disallowed %x\n", pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0)); 2715 2715 } 2716 2716 } … … 2724 2724 * @param pSSM SSM operation handle. 2725 2725 */ 2726 static DECLCALLBACK(int) h waccmR3Save(PVM pVM, PSSMHANDLE pSSM)2726 static DECLCALLBACK(int) hmR3Save(PVM pVM, PSSMHANDLE pSSM) 2727 2727 { 2728 2728 int rc; 2729 2729 2730 Log(("h waccmR3Save:\n"));2730 Log(("hmR3Save:\n")); 2731 2731 2732 2732 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 2735 2735 * Save the basic bits - fortunately all the other things can be resynced on load. 2736 2736 */ 2737 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].h waccm.s.Event.fPending);2737 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.fPending); 2738 2738 AssertRCReturn(rc, rc); 2739 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].h waccm.s.Event.errCode);2739 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.Event.errCode); 2740 2740 AssertRCReturn(rc, rc); 2741 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].h waccm.s.Event.intInfo);2741 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hm.s.Event.intInfo); 2742 2742 AssertRCReturn(rc, rc); 2743 2743 2744 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].h waccm.s.vmx.enmLastSeenGuestMode);2744 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode); 2745 2745 AssertRCReturn(rc, rc); 2746 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].h waccm.s.vmx.enmCurrGuestMode);2746 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode); 2747 2747 AssertRCReturn(rc, rc); 2748 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].h waccm.s.vmx.enmPrevGuestMode);2748 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode); 2749 2749 AssertRCReturn(rc, rc); 2750 2750 } 2751 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING2752 rc = SSMR3PutGCPtr(pSSM, pVM->h waccm.s.pGuestPatchMem);2751 #ifdef VBOX_HM_WITH_GUEST_PATCHING 2752 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pGuestPatchMem); 2753 2753 AssertRCReturn(rc, rc); 2754 rc = SSMR3PutGCPtr(pSSM, pVM->h waccm.s.pFreeGuestPatchMem);2754 rc = SSMR3PutGCPtr(pSSM, pVM->hm.s.pFreeGuestPatchMem); 2755 2755 AssertRCReturn(rc, rc); 2756 rc = SSMR3PutU32(pSSM, pVM->h waccm.s.cbGuestPatchMem);2756 rc = SSMR3PutU32(pSSM, pVM->hm.s.cbGuestPatchMem); 2757 2757 AssertRCReturn(rc, rc); 2758 2758 2759 2759 /* Store all the guest patch records too. */ 2760 rc = SSMR3PutU32(pSSM, pVM->h waccm.s.cPatches);2760 rc = SSMR3PutU32(pSSM, pVM->hm.s.cPatches); 2761 2761 AssertRCReturn(rc, rc); 2762 2762 2763 for (unsigned i = 0; i < pVM->h waccm.s.cPatches; i++)2764 { 2765 PH WACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];2763 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++) 2764 { 2765 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i]; 2766 2766 2767 2767 rc = SSMR3PutU32(pSSM, pPatch->Core.Key); … … 2780 2780 AssertRCReturn(rc, rc); 2781 2781 2782 AssertCompileSize(H WACCMTPRINSTR, 4);2782 AssertCompileSize(HMTPRINSTR, 4); 2783 2783 rc = SSMR3PutU32(pSSM, (uint32_t)pPatch->enmType); 2784 2784 AssertRCReturn(rc, rc); … … 2810 2810 * @param uPass The data pass. 2811 2811 */ 2812 static DECLCALLBACK(int) h waccmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)2812 static DECLCALLBACK(int) hmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass) 2813 2813 { 2814 2814 int rc; 2815 2815 2816 Log(("h waccmR3Load:\n"));2816 Log(("hmR3Load:\n")); 2817 2817 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass); 2818 2818 … … 2820 2820 * Validate version. 2821 2821 */ 2822 if ( uVersion != H WACCM_SSM_VERSION2823 && uVersion != H WACCM_SSM_VERSION_NO_PATCHING2824 && uVersion != H WACCM_SSM_VERSION_2_0_X)2825 { 2826 AssertMsgFailed(("h waccmR3Load: Invalid version uVersion=%d!\n", uVersion));2822 if ( uVersion != HM_SSM_VERSION 2823 && uVersion != HM_SSM_VERSION_NO_PATCHING 2824 && uVersion != HM_SSM_VERSION_2_0_X) 2825 { 2826 AssertMsgFailed(("hmR3Load: Invalid version uVersion=%d!\n", uVersion)); 2827 2827 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION; 2828 2828 } 2829 2829 for (VMCPUID i = 0; i < pVM->cCpus; i++) 2830 2830 { 2831 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].h waccm.s.Event.fPending);2831 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.fPending); 2832 2832 AssertRCReturn(rc, rc); 2833 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].h waccm.s.Event.errCode);2833 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hm.s.Event.errCode); 2834 2834 AssertRCReturn(rc, rc); 2835 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].h waccm.s.Event.intInfo);2835 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hm.s.Event.intInfo); 2836 2836 AssertRCReturn(rc, rc); 2837 2837 2838 if (uVersion >= H WACCM_SSM_VERSION_NO_PATCHING)2838 if (uVersion >= HM_SSM_VERSION_NO_PATCHING) 2839 2839 { 2840 2840 uint32_t val; … … 2842 2842 rc = SSMR3GetU32(pSSM, &val); 2843 2843 AssertRCReturn(rc, rc); 2844 pVM->aCpus[i].h waccm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val;2844 pVM->aCpus[i].hm.s.vmx.enmLastSeenGuestMode = (PGMMODE)val; 2845 2845 2846 2846 rc = SSMR3GetU32(pSSM, &val); 2847 2847 AssertRCReturn(rc, rc); 2848 pVM->aCpus[i].h waccm.s.vmx.enmCurrGuestMode = (PGMMODE)val;2848 pVM->aCpus[i].hm.s.vmx.enmCurrGuestMode = (PGMMODE)val; 2849 2849 2850 2850 rc = SSMR3GetU32(pSSM, &val); 2851 2851 AssertRCReturn(rc, rc); 2852 pVM->aCpus[i].h waccm.s.vmx.enmPrevGuestMode = (PGMMODE)val;2852 pVM->aCpus[i].hm.s.vmx.enmPrevGuestMode = (PGMMODE)val; 2853 2853 } 2854 2854 } 2855 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING2856 if (uVersion > H WACCM_SSM_VERSION_NO_PATCHING)2857 { 2858 rc = SSMR3GetGCPtr(pSSM, &pVM->h waccm.s.pGuestPatchMem);2855 #ifdef VBOX_HM_WITH_GUEST_PATCHING 2856 if (uVersion > HM_SSM_VERSION_NO_PATCHING) 2857 { 2858 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pGuestPatchMem); 2859 2859 AssertRCReturn(rc, rc); 2860 rc = SSMR3GetGCPtr(pSSM, &pVM->h waccm.s.pFreeGuestPatchMem);2860 rc = SSMR3GetGCPtr(pSSM, &pVM->hm.s.pFreeGuestPatchMem); 2861 2861 AssertRCReturn(rc, rc); 2862 rc = SSMR3GetU32(pSSM, &pVM->h waccm.s.cbGuestPatchMem);2862 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cbGuestPatchMem); 2863 2863 AssertRCReturn(rc, rc); 2864 2864 2865 2865 /* Fetch all TPR patch records. */ 2866 rc = SSMR3GetU32(pSSM, &pVM->h waccm.s.cPatches);2866 rc = SSMR3GetU32(pSSM, &pVM->hm.s.cPatches); 2867 2867 AssertRCReturn(rc, rc); 2868 2868 2869 for (unsigned i = 0; i < pVM->h waccm.s.cPatches; i++)2869 for (unsigned i = 0; i < pVM->hm.s.cPatches; i++) 2870 2870 { 2871 PH WACCMTPRPATCH pPatch = &pVM->hwaccm.s.aPatches[i];2871 PHMTPRPATCH pPatch = &pVM->hm.s.aPatches[i]; 2872 2872 2873 2873 rc = SSMR3GetU32(pSSM, &pPatch->Core.Key); … … 2889 2889 AssertRCReturn(rc, rc); 2890 2890 2891 if (pPatch->enmType == H WACCMTPRINSTR_JUMP_REPLACEMENT)2892 pVM->h waccm.s.fTPRPatchingActive = true;2893 2894 Assert(pPatch->enmType == H WACCMTPRINSTR_JUMP_REPLACEMENT || pVM->hwaccm.s.fTPRPatchingActive == false);2891 if (pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT) 2892 pVM->hm.s.fTPRPatchingActive = true; 2893 2894 Assert(pPatch->enmType == HMTPRINSTR_JUMP_REPLACEMENT || pVM->hm.s.fTPRPatchingActive == false); 2895 2895 2896 2896 rc = SSMR3GetU32(pSSM, &pPatch->uSrcOperand); … … 2906 2906 AssertRCReturn(rc, rc); 2907 2907 2908 Log(("h waccmR3Load: patch %d\n", i));2908 Log(("hmR3Load: patch %d\n", i)); 2909 2909 Log(("Key = %x\n", pPatch->Core.Key)); 2910 2910 Log(("cbOp = %d\n", pPatch->cbOp)); … … 2915 2915 Log(("cFaults = %d\n", pPatch->cFaults)); 2916 2916 Log(("target = %x\n", pPatch->pJumpTarget)); 2917 rc = RTAvloU32Insert(&pVM->h waccm.s.PatchTree, &pPatch->Core);2917 rc = RTAvloU32Insert(&pVM->hm.s.PatchTree, &pPatch->Core); 2918 2918 AssertRC(rc); 2919 2919 } … … 2921 2921 #endif 2922 2922 2923 /* Recheck all VCPUs if we can go straight into h waccm execution mode. */2924 if (H WACCMIsEnabled(pVM))2923 /* Recheck all VCPUs if we can go straight into hm execution mode. */ 2924 if (HMIsEnabled(pVM)) 2925 2925 { 2926 2926 for (VMCPUID i = 0; i < pVM->cCpus; i++) … … 2928 2928 PVMCPU pVCpu = &pVM->aCpus[i]; 2929 2929 2930 H WACCMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu));2930 HMR3CanExecuteGuest(pVM, CPUMQueryGuestCtxPtr(pVCpu)); 2931 2931 } 2932 2932 } -
trunk/src/VBox/VMM/VMMR3/IOM.cpp
r41965 r43387 53 53 * 54 54 * 55 * @section sec_iom_h waccm Hardware Assisted Virtualization Mode55 * @section sec_iom_hm Hardware Assisted Virtualization Mode 56 56 * 57 57 * When running in hardware assisted virtualization mode we'll be doing much the -
trunk/src/VBox/VMM/VMMR3/PDMLdr.cpp
r41965 r43387 32 32 #include <VBox/param.h> 33 33 #include <VBox/err.h> 34 #include <VBox/vmm/h waccm.h>34 #include <VBox/vmm/hm.h> 35 35 #include <VBox/VBoxTpG.h> 36 36 -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r42612 r43387 622 622 #include <VBox/vmm/selm.h> 623 623 #include <VBox/vmm/ssm.h> 624 #include <VBox/vmm/h waccm.h>624 #include <VBox/vmm/hm.h> 625 625 #include "PGMInternal.h" 626 626 #include <VBox/vmm/vm.h> … … 2239 2239 switch (enmWhat) 2240 2240 { 2241 case VMINITCOMPLETED_H WACCM:2241 case VMINITCOMPLETED_HM: 2242 2242 #ifdef VBOX_WITH_PCI_PASSTHROUGH 2243 2243 if (pVM->pgm.s.fPciPassthrough) 2244 2244 { 2245 2245 AssertLogRelReturn(pVM->pgm.s.fRamPreAlloc, VERR_PCI_PASSTHROUGH_NO_RAM_PREALLOC); 2246 AssertLogRelReturn(H WACCMIsEnabled(pVM), VERR_PCI_PASSTHROUGH_NO_HWACCM);2247 AssertLogRelReturn(H WACCMIsNestedPagingActive(pVM), VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING);2246 AssertLogRelReturn(HMIsEnabled(pVM), VERR_PCI_PASSTHROUGH_NO_HM); 2247 AssertLogRelReturn(HMIsNestedPagingActive(pVM), VERR_PCI_PASSTHROUGH_NO_NESTED_PAGING); 2248 2248 2249 2249 /* … … 2578 2578 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 2579 2579 pgmR3RefreshShadowModeAfterA20Change(pVCpu); 2580 H WACCMFlushTLB(pVCpu);2580 HMFlushTLB(pVCpu); 2581 2581 #endif 2582 2582 } … … 3173 3173 case PGMMODE_PROTECTED: 3174 3174 if ( enmShadowMode != PGMMODE_INVALID 3175 && !H WACCMIsEnabled(pVM) /* always switch in hwaccm mode! */)3175 && !HMIsEnabled(pVM) /* always switch in hm mode! */) 3176 3176 break; /* (no change) */ 3177 3177 … … 3328 3328 } 3329 3329 /* Override the shadow mode is nested paging is active. */ 3330 pVM->pgm.s.fNestedPaging = H WACCMIsNestedPagingActive(pVM);3330 pVM->pgm.s.fNestedPaging = HMIsNestedPagingActive(pVM); 3331 3331 if (pVM->pgm.s.fNestedPaging) 3332 enmShadowMode = H WACCMGetShwPagingMode(pVM);3332 enmShadowMode = HMGetShwPagingMode(pVM); 3333 3333 3334 3334 *penmSwitcher = enmSwitcher; … … 3629 3629 } 3630 3630 3631 /* Notify H WACCM as well. */3632 H WACCMR3PagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode);3631 /* Notify HM as well. */ 3632 HMR3PagingModeChanged(pVM, pVCpu, pVCpu->pgm.s.enmShadowMode, pVCpu->pgm.s.enmGuestMode); 3633 3633 return rc; 3634 3634 } -
trunk/src/VBox/VMM/VMMR3/PGMBth.h
r41801 r43387 132 132 PVM pVM = pVCpu->pVMR3; 133 133 134 Assert(H WACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);134 Assert(HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging); 135 135 Assert(!pVM->pgm.s.fNestedPaging); 136 136 -
trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
r41982 r43387 52 52 #include <VBox/param.h> 53 53 #include <VBox/err.h> 54 #include <VBox/vmm/h waccm.h>54 #include <VBox/vmm/hm.h> 55 55 56 56 … … 240 240 241 241 /* Not supported/relevant for VT-x and AMD-V. */ 242 if (H WACCMIsEnabled(pVM))242 if (HMIsEnabled(pVM)) 243 243 return VERR_NOT_IMPLEMENTED; 244 244 … … 293 293 294 294 /* Not supported/relevant for VT-x and AMD-V. */ 295 if (H WACCMIsEnabled(pVM))295 if (HMIsEnabled(pVM)) 296 296 return VERR_NOT_IMPLEMENTED; 297 297 … … 581 581 "Hypervisor Virtual handlers:\n" 582 582 "%*s %*s %*s %*s Type Description\n", 583 - (int)sizeof(RTGCPTR) * 2, "From", 584 - (int)sizeof(RTGCPTR) * 2 - 3, "- To (excl)", 585 - (int)sizeof(RTHCPTR) * 2 - 1, "HandlerHC", 583 - (int)sizeof(RTGCPTR) * 2, "From", 584 - (int)sizeof(RTGCPTR) * 2 - 3, "- To (excl)", 585 - (int)sizeof(RTHCPTR) * 2 - 1, "HandlerHC", 586 586 - (int)sizeof(RTRCPTR) * 2 - 1, "HandlerGC"); 587 587 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.pTreesR3->HyperVirtHandlers, true, pgmR3InfoHandlersVirtualOne, &Args); -
trunk/src/VBox/VMM/VMMR3/PGMMap.cpp
r41965 r43387 512 512 if (!pgmMapAreMappingsEnabled(pVM)) 513 513 { 514 Assert(H WACCMIsEnabled(pVM));514 Assert(HMIsEnabled(pVM)); 515 515 return VINF_SUCCESS; 516 516 } … … 674 674 675 675 /** 676 * Interface for disabling the guest mappings when switching to H WACCM mode676 * Interface for disabling the guest mappings when switching to HM mode 677 677 * during VM creation and VM reset. 678 678 * -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r43047 r43387 3795 3795 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 3796 3796 pgmR3RefreshShadowModeAfterA20Change(pVCpu); 3797 H WACCMFlushTLB(pVCpu);3797 HMFlushTLB(pVCpu); 3798 3798 #endif 3799 3799 STAM_REL_COUNTER_INC(&pVCpu->pgm.s.cA20Changes); … … 4326 4326 * 4327 4327 * @remarks The VINF_EM_NO_MEMORY status is for the benefit of the FF processing 4328 * in EM.cpp and shouldn't be propagated outside TRPM, H WACCM, EM and4328 * in EM.cpp and shouldn't be propagated outside TRPM, HM, EM and 4329 4329 * pgmPhysEnsureHandyPage. There is one exception to this in the \#PF 4330 4330 * handler. -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r43163 r43387 459 459 /** @todo change the pool to handle ROOT page allocations specially when 460 460 * required. */ 461 bool fCanUseHighMemory = H WACCMIsNestedPagingActive(pVM)462 && H WACCMGetShwPagingMode(pVM) == PGMMODE_EPT;461 bool fCanUseHighMemory = HMIsNestedPagingActive(pVM) 462 && HMGetShwPagingMode(pVM) == PGMMODE_EPT; 463 463 464 464 pgmLock(pVM); -
trunk/src/VBox/VMM/VMMR3/PGMShw.h
r41801 r43387 188 188 PVM pVM = pVCpu->pVMR3; 189 189 190 Assert(H WACCMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging);190 Assert(HMIsNestedPagingActive(pVM) == pVM->pgm.s.fNestedPaging); 191 191 Assert(pVM->pgm.s.fNestedPaging); 192 192 Assert(!pVCpu->pgm.s.pShwPageCR3R3); -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r43079 r43387 93 93 # include <VBox/vmm/rem.h> 94 94 #endif 95 #include <VBox/vmm/h waccm.h>95 #include <VBox/vmm/hm.h> 96 96 97 97 #include <VBox/err.h> … … 1523 1523 { 1524 1524 # ifndef IEM_VERIFICATION_MODE 1525 if (H WACCMIsEnabled(pVM))1525 if (HMIsEnabled(pVM)) 1526 1526 # endif 1527 1527 { … … 1529 1529 AssertRC(rc); 1530 1530 STAM_COUNTER_INC(&pVM->trpm.s.paStatForwardedIRQR3[u8Interrupt]); 1531 return H WACCMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM;1531 return HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM; 1532 1532 } 1533 1533 /* If the guest gate is not patched, then we will check (again) if we can patch it. */ … … 1564 1564 { 1565 1565 AssertRC(rc); 1566 return H WACCMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */1566 return HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HWACC : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */ 1567 1567 } 1568 1568 #else 1569 if (H WACCMR3IsActive(pVCpu))1569 if (HMR3IsActive(pVCpu)) 1570 1570 { 1571 1571 uint8_t u8Interrupt; -
trunk/src/VBox/VMM/VMMR3/VM.cpp
r41965 r43387 66 66 #include <VBox/vmm/ssm.h> 67 67 #include <VBox/vmm/ftm.h> 68 #include <VBox/vmm/h waccm.h>68 #include <VBox/vmm/hm.h> 69 69 #include "VMInternal.h" 70 70 #include <VBox/vmm/vm.h> … … 302 302 303 303 #ifndef RT_OS_DARWIN 304 case VERR_H WACCM_CONFIG_MISMATCH:304 case VERR_HM_CONFIG_MISMATCH: 305 305 pszError = N_("VT-x/AMD-V is either not available on your host or disabled. " 306 306 "This hardware extension is required by the VM configuration"); … … 355 355 break; 356 356 357 case VERR_PCI_PASSTHROUGH_NO_H WACCM:357 case VERR_PCI_PASSTHROUGH_NO_HM: 358 358 pszError = N_("PCI passthrough requires VT-x/AMD-V"); 359 359 break; … … 656 656 rc = CFGMR3QueryBoolDef(pRoot, "HwVirtExtForced", &pVM->fHwVirtExtForced, false); 657 657 if (RT_SUCCESS(rc) && pVM->fHwVirtExtForced) 658 pVM->fH WACCMEnabled = true;658 pVM->fHMEnabled = true; 659 659 660 660 /* … … 913 913 if (RT_SUCCESS(rc)) 914 914 { 915 rc = H WACCMR3Init(pVM);915 rc = HMR3Init(pVM); 916 916 if (RT_SUCCESS(rc)) 917 917 { … … 1029 1029 AssertRC(rc2); 1030 1030 } 1031 int rc2 = H WACCMR3Term(pVM);1031 int rc2 = HMR3Term(pVM); 1032 1032 AssertRC(rc2); 1033 1033 } … … 1071 1071 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_RING0); 1072 1072 if (RT_SUCCESS(rc)) 1073 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_H WACCM);1074 1075 /** @todo Move this to the VMINITCOMPLETED_H WACCM notification handler. */1073 rc = vmR3InitDoCompleted(pVM, VMINITCOMPLETED_HM); 1074 1075 /** @todo Move this to the VMINITCOMPLETED_HM notification handler. */ 1076 1076 if (RT_SUCCESS(rc)) 1077 CPUMR3SetHWVirtEx(pVM, H WACCMIsEnabled(pVM));1077 CPUMR3SetHWVirtEx(pVM, HMIsEnabled(pVM)); 1078 1078 1079 1079 LogFlow(("vmR3InitRing0: returns %Rrc\n", rc)); … … 1125 1125 int rc = VMMR3InitCompleted(pVM, enmWhat); 1126 1126 if (RT_SUCCESS(rc)) 1127 rc = H WACCMR3InitCompleted(pVM, enmWhat);1127 rc = HMR3InitCompleted(pVM, enmWhat); 1128 1128 if (RT_SUCCESS(rc)) 1129 1129 rc = PGMR3InitCompleted(pVM, enmWhat); … … 1188 1188 PGMR3Relocate(pVM, 0); /* Repeat after PDM relocation. */ 1189 1189 CPUMR3Relocate(pVM); 1190 H WACCMR3Relocate(pVM);1190 HMR3Relocate(pVM); 1191 1191 SELMR3Relocate(pVM); 1192 1192 VMMR3Relocate(pVM, offDelta); … … 2454 2454 AssertRC(rc); 2455 2455 #endif 2456 rc = H WACCMR3Term(pVM);2456 rc = HMR3Term(pVM); 2457 2457 AssertRC(rc); 2458 2458 rc = PGMR3Term(pVM); … … 2863 2863 TMR3Reset(pVM); 2864 2864 EMR3Reset(pVM); 2865 H WACCMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */2865 HMR3Reset(pVM); /* This must come *after* PATM, CSAM, CPUM, SELM and TRPM. */ 2866 2866 2867 2867 #ifdef LOG_ENABLED … … 4444 4444 CPUMR3ResetCpu(pVCpu); 4445 4445 EMR3ResetCpu(pVCpu); 4446 H WACCMR3ResetCpu(pVCpu);4446 HMR3ResetCpu(pVCpu); 4447 4447 return VINF_EM_WAIT_SIPI; 4448 4448 } -
trunk/src/VBox/VMM/VMMR3/VMM.cpp
r42975 r43387 105 105 #include <VBox/param.h> 106 106 #include <VBox/version.h> 107 #include <VBox/vmm/h waccm.h>107 #include <VBox/vmm/hm.h> 108 108 #include <iprt/assert.h> 109 109 #include <iprt/alloc.h> … … 210 210 AssertRCReturn(rc, rc); 211 211 212 /* GC switchers are enabled by default. Turned off by H WACCM. */212 /* GC switchers are enabled by default. Turned off by HM. */ 213 213 pVM->vmm.s.fSwitcherDisabled = false; 214 214 … … 448 448 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPGMFlushPending, STAMTYPE_COUNTER, "/VMM/RZRet/PGMFlushPending", STAMUNIT_OCCURENCES, "Number of VINF_PGM_POOL_FLUSH_PENDING returns."); 449 449 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPendingRequest, STAMTYPE_COUNTER, "/VMM/RZRet/PendingRequest", STAMUNIT_OCCURENCES, "Number of VINF_EM_PENDING_REQUEST returns."); 450 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_H WACCM_PATCH_TPR_INSTR returns.");450 STAM_REG(pVM, &pVM->vmm.s.StatRZRetPatchTPR, STAMTYPE_COUNTER, "/VMM/RZRet/PatchTPR", STAMUNIT_OCCURENCES, "Number of VINF_EM_HM_PATCH_TPR_INSTR returns."); 451 451 STAM_REG(pVM, &pVM->vmm.s.StatRZRetCallRing3, STAMTYPE_COUNTER, "/VMM/RZCallR3/Misc", STAMUNIT_OCCURENCES, "Number of Other ring-3 calls."); 452 452 STAM_REG(pVM, &pVM->vmm.s.StatRZCallPDMLock, STAMTYPE_COUNTER, "/VMM/RZCallR3/PDMLock", STAMUNIT_OCCURENCES, "Number of VMMCALLRING3_PDM_LOCK calls."); … … 681 681 */ 682 682 if ( pVM->vmm.s.fUsePeriodicPreemptionTimers 683 && H WACCMR3IsVmxPreemptionTimerUsed(pVM))683 && HMR3IsVmxPreemptionTimerUsed(pVM)) 684 684 pVM->vmm.s.fUsePeriodicPreemptionTimers = false; 685 685 LogRel(("VMM: fUsePeriodicPreemptionTimers=%RTbool\n", pVM->vmm.s.fUsePeriodicPreemptionTimers)); … … 937 937 VMMR3DECL(const char *) VMMR3GetRZAssertMsg1(PVM pVM) 938 938 { 939 if (H WACCMIsEnabled(pVM))939 if (HMIsEnabled(pVM)) 940 940 return pVM->vmm.s.szRing0AssertMsg1; 941 941 … … 957 957 VMMR3DECL(const char *) VMMR3GetRZAssertMsg2(PVM pVM) 958 958 { 959 if (H WACCMIsEnabled(pVM))959 if (HMIsEnabled(pVM)) 960 960 return pVM->vmm.s.szRing0AssertMsg2; 961 961 … … 1403 1403 { 1404 1404 VM_ASSERT_EMT(pVM); 1405 if (H WACCMIsEnabled(pVM))1406 return H WACMMR3EnablePatching(pVM, pPatchMem, cbPatchMem);1405 if (HMIsEnabled(pVM)) 1406 return HMR3EnablePatching(pVM, pPatchMem, cbPatchMem); 1407 1407 1408 1408 return VERR_NOT_SUPPORTED; … … 1419 1419 VMMR3DECL(int) VMMR3DeregisterPatchMemory(PVM pVM, RTGCPTR pPatchMem, unsigned cbPatchMem) 1420 1420 { 1421 if (H WACCMIsEnabled(pVM))1422 return H WACMMR3DisablePatching(pVM, pPatchMem, cbPatchMem);1421 if (HMIsEnabled(pVM)) 1422 return HMR3DisablePatching(pVM, pPatchMem, cbPatchMem); 1423 1423 1424 1424 return VINF_SUCCESS; … … 2339 2339 PRINT_GROUP(VMCPU_FF_,NORMAL_PRIORITY,_MASK); 2340 2340 PRINT_GROUP(VMCPU_FF_,RESUME_GUEST,_MASK); 2341 PRINT_GROUP(VMCPU_FF_,H WACCM_TO_R3,_MASK);2341 PRINT_GROUP(VMCPU_FF_,HM_TO_R3,_MASK); 2342 2342 PRINT_GROUP(VMCPU_FF_,ALL_REM,_MASK); 2343 2343 if (c) -
trunk/src/VBox/VMM/VMMR3/VMMGuruMeditation.cpp
r41965 r43387 34 34 #include <VBox/param.h> 35 35 #include <VBox/version.h> 36 #include <VBox/vmm/h waccm.h>36 #include <VBox/vmm/hm.h> 37 37 #include <iprt/assert.h> 38 38 #include <iprt/time.h> … … 301 301 RTGCUINTPTR uCR2 = 0xdeadface; 302 302 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2); 303 if (!H WACCMIsEnabled(pVM))303 if (!HMIsEnabled(pVM)) 304 304 { 305 305 if (RT_SUCCESS(rc2)) … … 320 320 * Dump the relevant hypervisor registers and stack. 321 321 */ 322 if (H WACCMIsEnabled(pVM))322 if (HMIsEnabled(pVM)) 323 323 { 324 324 if ( rcErr == VERR_VMM_RING0_ASSERTION /* fInRing3Call has already been cleared here. */ … … 592 592 pVCpu->vmm.s.pbEMTStackRC, pVCpu->vmm.s.pbEMTStackBottomRC, 593 593 VMM_STACK_SIZE, pVCpu->vmm.s.pbEMTStackR3); 594 } /* !H WACCMIsEnabled */594 } /* !HMIsEnabled */ 595 595 break; 596 596 } -
trunk/src/VBox/VMM/VMMR3/VMMTests.cpp
r41985 r43387 34 34 #include <VBox/err.h> 35 35 #include <VBox/param.h> 36 #include <VBox/vmm/h waccm.h>36 #include <VBox/vmm/hm.h> 37 37 38 38 #include <iprt/assert.h> … … 477 477 PVMCPU pVCpu = &pVM->aCpus[0]; 478 478 479 if (!H WACCMR3IsAllowed(pVM))479 if (!HMR3IsAllowed(pVM)) 480 480 { 481 481 RTPrintf("VMM: Hardware accelerated test not available!\n"); … … 542 542 CPUMSetHyperState(pVCpu, pVM->vmm.s.pfnCallTrampolineRC, pVCpu->vmm.s.pbEMTStackBottomRC, 0, 0); 543 543 CPUMPushHyper(pVCpu, 0); 544 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_H WACCM_NOP);544 CPUMPushHyper(pVCpu, VMMGC_DO_TESTCASE_HM_NOP); 545 545 CPUMPushHyper(pVCpu, pVM->pVMRC); 546 546 CPUMPushHyper(pVCpu, 3 * sizeof(RTRCPTR)); /* stack frame size */ -
trunk/src/VBox/VMM/VMMRC/HMRCA.asm
r43373 r43387 23 23 %include "VBox/asmdefs.mac" 24 24 %include "VBox/err.mac" 25 %include "VBox/vmm/h wacc_vmx.mac"25 %include "VBox/vmm/hm_vmx.mac" 26 26 %include "VBox/vmm/cpum.mac" 27 27 %include "iprt/x86.mac" 28 %include "H WACCMInternal.mac"28 %include "HMInternal.mac" 29 29 30 30 %ifdef RT_OS_OS2 ;; @todo fix OMF support in yasm and kick nasm out completely. … … 535 535 ; * @param pCtx Guest context [rsi] 536 536 ; */ 537 BEGINPROC H WACCMSaveGuestFPU64537 BEGINPROC HMSaveGuestFPU64 538 538 mov rax, cr0 539 539 mov rcx, rax ; save old CR0 … … 547 547 mov eax, VINF_SUCCESS 548 548 ret 549 ENDPROC H WACCMSaveGuestFPU64549 ENDPROC HMSaveGuestFPU64 550 550 551 551 ;/** … … 555 555 ; * @param pCtx Guest context [rsi] 556 556 ; */ 557 BEGINPROC H WACCMSaveGuestDebug64557 BEGINPROC HMSaveGuestDebug64 558 558 mov rax, dr0 559 559 mov qword [rsi + CPUMCTX.dr + 0*8], rax … … 568 568 mov eax, VINF_SUCCESS 569 569 ret 570 ENDPROC H WACCMSaveGuestDebug64570 ENDPROC HMSaveGuestDebug64 571 571 572 572 ;/** … … 581 581 ; * @param pCtx Guest context [rsi] 582 582 ; */ 583 BEGINPROC H WACCMTestSwitcher64583 BEGINPROC HMTestSwitcher64 584 584 mov eax, [rsp+8] 585 585 ret 586 ENDPROC H WACCMTestSwitcher64586 ENDPROC HMTestSwitcher64 -
trunk/src/VBox/VMM/VMMRC/VMMRC.cpp
r41976 r43387 120 120 * Testcase executes a privileged instruction to force a world switch. (in both SVM & VMX) 121 121 */ 122 case VMMGC_DO_TESTCASE_H WACCM_NOP:122 case VMMGC_DO_TESTCASE_HM_NOP: 123 123 ASMRdMsr_Low(MSR_IA32_SYSENTER_CS); 124 124 return 0; -
trunk/src/VBox/VMM/VMMRZ/PGMRZDynMap.cpp
r41836 r43387 1842 1842 * Starts or migrates the autoset of a virtual CPU. 1843 1843 * 1844 * This is used by H WACCMR0Enter. When we've longjumped out of the HWACCM1844 * This is used by HMR0Enter. When we've longjumped out of the HM 1845 1845 * execution loop with the set open, we'll migrate it when re-entering. While 1846 1846 * under normal circumstances, we'll start it so VMXR0LoadGuestState can access -
trunk/src/VBox/VMM/include/EMHandleRCTmpl.h
r42779 r43387 20 20 21 21 /** 22 * Process a subset of the raw-mode and h waccm return codes.22 * Process a subset of the raw-mode and hm return codes. 23 23 * 24 24 * Since we have to share this with raw-mode single stepping, this inline … … 35 35 #ifdef EMHANDLERC_WITH_PATM 36 36 int emR3RawHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc) 37 #elif defined(EMHANDLERC_WITH_H WACCM)37 #elif defined(EMHANDLERC_WITH_HM) 38 38 int emR3HwaccmHandleRC(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc) 39 39 #endif … … 219 219 break; 220 220 221 #ifdef EMHANDLERC_WITH_H WACCM221 #ifdef EMHANDLERC_WITH_HM 222 222 /* 223 223 * (MM)IO intensive code block detected; fall back to the recompiler for better performance 224 224 */ 225 225 case VINF_EM_RAW_EMULATE_IO_BLOCK: 226 rc = H WACCMR3EmulateIoBlock(pVM, pCtx);227 break; 228 229 case VINF_EM_H WACCM_PATCH_TPR_INSTR:230 rc = H WACCMR3PatchTprInstr(pVM, pVCpu, pCtx);226 rc = HMR3EmulateIoBlock(pVM, pCtx); 227 break; 228 229 case VINF_EM_HM_PATCH_TPR_INSTR: 230 rc = HMR3PatchTprInstr(pVM, pVCpu, pCtx); 231 231 break; 232 232 #endif … … 334 334 break; 335 335 336 #ifdef EMHANDLERC_WITH_H WACCM336 #ifdef EMHANDLERC_WITH_HM 337 337 /* 338 338 * Up a level, after HwAccM have done some release logging. … … 347 347 case VERR_VMX_UNABLE_TO_START_VM: 348 348 case VERR_VMX_UNABLE_TO_RESUME_VM: 349 H WACCMR3CheckError(pVM, rc);349 HMR3CheckError(pVM, rc); 350 350 break; 351 351 -
trunk/src/VBox/VMM/include/HMInternal.h
r43373 r43387 16 16 */ 17 17 18 #ifndef ___H WACCMInternal_h19 #define ___H WACCMInternal_h18 #ifndef ___HMInternal_h 19 #define ___HMInternal_h 20 20 21 21 #include <VBox/cdefs.h> … … 24 24 #include <VBox/vmm/stam.h> 25 25 #include <VBox/dis.h> 26 #include <VBox/vmm/h waccm.h>27 #include <VBox/vmm/h wacc_vmx.h>26 #include <VBox/vmm/hm.h> 27 #include <VBox/vmm/hm_vmx.h> 28 28 #include <VBox/vmm/pgm.h> 29 29 #include <VBox/vmm/cpum.h> … … 39 39 40 40 #define VMX_USE_CACHED_VMCS_ACCESSES 41 #define H WACCM_VMX_EMULATE_REALMODE41 #define HM_VMX_EMULATE_REALMODE 42 42 43 43 /* The MSR auto load/store does not work for KERNEL_GS_BASE MSR, thus we … … 52 52 53 53 54 /** @defgroup grp_h waccm_int Internal55 * @ingroup grp_h waccm54 /** @defgroup grp_hm_int Internal 55 * @ingroup grp_hm 56 56 * @internal 57 57 * @{ … … 69 69 * @{ 70 70 */ 71 #define H WACCM_CHANGED_GUEST_FPU RT_BIT(0)72 #define H WACCM_CHANGED_GUEST_CR0 RT_BIT(1)73 #define H WACCM_CHANGED_GUEST_CR3 RT_BIT(2)74 #define H WACCM_CHANGED_GUEST_CR4 RT_BIT(3)75 #define H WACCM_CHANGED_GUEST_GDTR RT_BIT(4)76 #define H WACCM_CHANGED_GUEST_IDTR RT_BIT(5)77 #define H WACCM_CHANGED_GUEST_LDTR RT_BIT(6)78 #define H WACCM_CHANGED_GUEST_TR RT_BIT(7)79 #define H WACCM_CHANGED_GUEST_MSR RT_BIT(8)80 #define H WACCM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9)81 #define H WACCM_CHANGED_GUEST_DEBUG RT_BIT(10)82 #define H WACCM_CHANGED_HOST_CONTEXT RT_BIT(11)83 84 #define H WACCM_CHANGED_ALL ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \85 | H WACCM_CHANGED_GUEST_CR0 \86 | H WACCM_CHANGED_GUEST_CR3 \87 | H WACCM_CHANGED_GUEST_CR4 \88 | H WACCM_CHANGED_GUEST_GDTR \89 | H WACCM_CHANGED_GUEST_IDTR \90 | H WACCM_CHANGED_GUEST_LDTR \91 | H WACCM_CHANGED_GUEST_TR \92 | H WACCM_CHANGED_GUEST_MSR \93 | H WACCM_CHANGED_GUEST_FPU \94 | H WACCM_CHANGED_GUEST_DEBUG \95 | H WACCM_CHANGED_HOST_CONTEXT)96 97 #define H WACCM_CHANGED_ALL_GUEST ( HWACCM_CHANGED_GUEST_SEGMENT_REGS \98 | H WACCM_CHANGED_GUEST_CR0 \99 | H WACCM_CHANGED_GUEST_CR3 \100 | H WACCM_CHANGED_GUEST_CR4 \101 | H WACCM_CHANGED_GUEST_GDTR \102 | H WACCM_CHANGED_GUEST_IDTR \103 | H WACCM_CHANGED_GUEST_LDTR \104 | H WACCM_CHANGED_GUEST_TR \105 | H WACCM_CHANGED_GUEST_MSR \106 | H WACCM_CHANGED_GUEST_DEBUG \107 | H WACCM_CHANGED_GUEST_FPU)71 #define HM_CHANGED_GUEST_FPU RT_BIT(0) 72 #define HM_CHANGED_GUEST_CR0 RT_BIT(1) 73 #define HM_CHANGED_GUEST_CR3 RT_BIT(2) 74 #define HM_CHANGED_GUEST_CR4 RT_BIT(3) 75 #define HM_CHANGED_GUEST_GDTR RT_BIT(4) 76 #define HM_CHANGED_GUEST_IDTR RT_BIT(5) 77 #define HM_CHANGED_GUEST_LDTR RT_BIT(6) 78 #define HM_CHANGED_GUEST_TR RT_BIT(7) 79 #define HM_CHANGED_GUEST_MSR RT_BIT(8) 80 #define HM_CHANGED_GUEST_SEGMENT_REGS RT_BIT(9) 81 #define HM_CHANGED_GUEST_DEBUG RT_BIT(10) 82 #define HM_CHANGED_HOST_CONTEXT RT_BIT(11) 83 84 #define HM_CHANGED_ALL ( HM_CHANGED_GUEST_SEGMENT_REGS \ 85 | HM_CHANGED_GUEST_CR0 \ 86 | HM_CHANGED_GUEST_CR3 \ 87 | HM_CHANGED_GUEST_CR4 \ 88 | HM_CHANGED_GUEST_GDTR \ 89 | HM_CHANGED_GUEST_IDTR \ 90 | HM_CHANGED_GUEST_LDTR \ 91 | HM_CHANGED_GUEST_TR \ 92 | HM_CHANGED_GUEST_MSR \ 93 | HM_CHANGED_GUEST_FPU \ 94 | HM_CHANGED_GUEST_DEBUG \ 95 | HM_CHANGED_HOST_CONTEXT) 96 97 #define HM_CHANGED_ALL_GUEST ( HM_CHANGED_GUEST_SEGMENT_REGS \ 98 | HM_CHANGED_GUEST_CR0 \ 99 | HM_CHANGED_GUEST_CR3 \ 100 | HM_CHANGED_GUEST_CR4 \ 101 | HM_CHANGED_GUEST_GDTR \ 102 | HM_CHANGED_GUEST_IDTR \ 103 | HM_CHANGED_GUEST_LDTR \ 104 | HM_CHANGED_GUEST_TR \ 105 | HM_CHANGED_GUEST_MSR \ 106 | HM_CHANGED_GUEST_DEBUG \ 107 | HM_CHANGED_GUEST_FPU) 108 108 109 109 /** @} */ 110 110 111 111 /** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */ 112 #define H WACCM_MAX_TLB_SHOOTDOWN_PAGES 8112 #define HM_MAX_TLB_SHOOTDOWN_PAGES 8 113 113 114 114 /** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */ 115 #define H WACCM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE115 #define HM_EPT_IDENTITY_PG_TABLE_SIZE PAGE_SIZE 116 116 /** Size of the TSS structure + 2 pages for the IO bitmap + end byte. */ 117 #define H WACCM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1)117 #define HM_VTX_TSS_SIZE (sizeof(VBOXTSS) + 2*PAGE_SIZE + 1) 118 118 /** Total guest mapped memory needed. */ 119 #define H WACCM_VTX_TOTAL_DEVHEAP_MEM (HWACCM_EPT_IDENTITY_PG_TABLE_SIZE + HWACCM_VTX_TSS_SIZE)119 #define HM_VTX_TOTAL_DEVHEAP_MEM (HM_EPT_IDENTITY_PG_TABLE_SIZE + HM_VTX_TSS_SIZE) 120 120 121 121 /** Enable for TPR guest patching. */ 122 #define VBOX_H WACCM_WITH_GUEST_PATCHING123 124 /** H WACCM SSM version125 */ 126 #ifdef VBOX_H WACCM_WITH_GUEST_PATCHING127 # define H WACCM_SSM_VERSION 5128 # define H WACCM_SSM_VERSION_NO_PATCHING 4122 #define VBOX_HM_WITH_GUEST_PATCHING 123 124 /** HM SSM version 125 */ 126 #ifdef VBOX_HM_WITH_GUEST_PATCHING 127 # define HM_SSM_VERSION 5 128 # define HM_SSM_VERSION_NO_PATCHING 4 129 129 #else 130 # define H WACCM_SSM_VERSION 4131 # define H WACCM_SSM_VERSION_NO_PATCHING 4132 #endif 133 #define H WACCM_SSM_VERSION_2_0_X 3130 # define HM_SSM_VERSION 4 131 # define HM_SSM_VERSION_NO_PATCHING 4 132 #endif 133 #define HM_SSM_VERSION_2_0_X 3 134 134 135 135 /** … … 160 160 typedef enum 161 161 { 162 H WACCMPENDINGIO_INVALID = 0,163 H WACCMPENDINGIO_PORT_READ,164 H WACCMPENDINGIO_PORT_WRITE,165 H WACCMPENDINGIO_STRING_READ,166 H WACCMPENDINGIO_STRING_WRITE,162 HMPENDINGIO_INVALID = 0, 163 HMPENDINGIO_PORT_READ, 164 HMPENDINGIO_PORT_WRITE, 165 HMPENDINGIO_STRING_READ, 166 HMPENDINGIO_STRING_WRITE, 167 167 /** The usual 32-bit paranoia. */ 168 H WACCMPENDINGIO_32BIT_HACK = 0x7fffffff169 } H WACCMPENDINGIO;168 HMPENDINGIO_32BIT_HACK = 0x7fffffff 169 } HMPENDINGIO; 170 170 171 171 172 172 typedef enum 173 173 { 174 H WACCMTPRINSTR_INVALID,175 H WACCMTPRINSTR_READ,176 H WACCMTPRINSTR_READ_SHR4,177 H WACCMTPRINSTR_WRITE_REG,178 H WACCMTPRINSTR_WRITE_IMM,179 H WACCMTPRINSTR_JUMP_REPLACEMENT,174 HMTPRINSTR_INVALID, 175 HMTPRINSTR_READ, 176 HMTPRINSTR_READ_SHR4, 177 HMTPRINSTR_WRITE_REG, 178 HMTPRINSTR_WRITE_IMM, 179 HMTPRINSTR_JUMP_REPLACEMENT, 180 180 /** The usual 32-bit paranoia. */ 181 H WACCMTPRINSTR_32BIT_HACK = 0x7fffffff182 } H WACCMTPRINSTR;181 HMTPRINSTR_32BIT_HACK = 0x7fffffff 182 } HMTPRINSTR; 183 183 184 184 typedef struct … … 195 195 uint32_t cbNewOp; 196 196 /** Instruction type. */ 197 H WACCMTPRINSTR enmType;197 HMTPRINSTR enmType; 198 198 /** Source operand. */ 199 199 uint32_t uSrcOperand; … … 204 204 /** Patch address of the jump replacement. */ 205 205 RTGCPTR32 pJumpTarget; 206 } H WACCMTPRPATCH;207 /** Pointer to H WACCMTPRPATCH. */208 typedef H WACCMTPRPATCH *PHWACCMTPRPATCH;206 } HMTPRPATCH; 207 /** Pointer to HMTPRPATCH. */ 208 typedef HMTPRPATCH *PHMTPRPATCH; 209 209 210 210 /** … … 215 215 * @returns Return code indicating the action to take. 216 216 */ 217 typedef DECLCALLBACK (int) FNH WACCMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU);217 typedef DECLCALLBACK (int) FNHMSWITCHERHC(PVM pVM, uint32_t uOffsetVMCPU); 218 218 /** Pointer to switcher function. */ 219 typedef FNH WACCMSWITCHERHC *PFNHWACCMSWITCHERHC;219 typedef FNHMSWITCHERHC *PFNHMSWITCHERHC; 220 220 221 221 /** 222 * H WACCM VM Instance data.223 * Changes to this must checked against the padding of the h waccm union in VM!224 */ 225 typedef struct H WACCM222 * HM VM Instance data. 223 * Changes to this must checked against the padding of the hm union in VM! 224 */ 225 typedef struct HM 226 226 { 227 227 /** Set when we've initialized VMX or SVM. */ … … 276 276 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) 277 277 /** 32 to 64 bits switcher entrypoint. */ 278 R0PTRTYPE(PFNH WACCMSWITCHERHC) pfnHost32ToGuest64R0;278 R0PTRTYPE(PFNHMSWITCHERHC) pfnHost32ToGuest64R0; 279 279 280 280 /* AMD-V 64 bits vmrun handler */ … … 300 300 struct 301 301 { 302 /** Set by the ring-0 side of H WACCM to indicate VMX is supported by the302 /** Set by the ring-0 side of HM to indicate VMX is supported by the 303 303 * CPU. */ 304 304 bool fSupported; … … 400 400 struct 401 401 { 402 /** Set by the ring-0 side of H WACCM to indicate SVM is supported by the402 /** Set by the ring-0 side of HM to indicate SVM is supported by the 403 403 * CPU. */ 404 404 bool fSupported; … … 432 432 AVLOU32TREE PatchTree; 433 433 uint32_t cPatches; 434 H WACCMTPRPATCH aPatches[64];434 HMTPRPATCH aPatches[64]; 435 435 436 436 struct … … 443 443 int32_t lLastError; 444 444 445 /** H WACCMR0Init was run */446 bool fH WACCMR0Init;445 /** HMR0Init was run */ 446 bool fHMR0Init; 447 447 bool u8Alignment1[7]; 448 448 … … 451 451 STAMCOUNTER StatTPRReplaceSuccess; 452 452 STAMCOUNTER StatTPRReplaceFailure; 453 } H WACCM;454 /** Pointer to H WACCM VM instance data. */455 typedef H WACCM *PHWACCM;453 } HM; 454 /** Pointer to HM VM instance data. */ 455 typedef HM *PHM; 456 456 457 457 /* Maximum number of cached entries. */ … … 518 518 519 519 /** VMX StartVM function. */ 520 typedef DECLCALLBACK(int) FNH WACCMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu);520 typedef DECLCALLBACK(int) FNHMVMXSTARTVM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 521 521 /** Pointer to a VMX StartVM function. */ 522 typedef R0PTRTYPE(FNH WACCMVMXSTARTVM *) PFNHWACCMVMXSTARTVM;522 typedef R0PTRTYPE(FNHMVMXSTARTVM *) PFNHMVMXSTARTVM; 523 523 524 524 /** SVM VMRun function. */ 525 typedef DECLCALLBACK(int) FNH WACCMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu);525 typedef DECLCALLBACK(int) FNHMSVMVMRUN(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 526 526 /** Pointer to a SVM VMRun function. */ 527 typedef R0PTRTYPE(FNH WACCMSVMVMRUN *) PFNHWACCMSVMVMRUN;527 typedef R0PTRTYPE(FNHMSVMVMRUN *) PFNHMSVMVMRUN; 528 528 529 529 /** 530 * H WACCM VMCPU Instance data.531 */ 532 typedef struct H WACCMCPU530 * HM VMCPU Instance data. 531 */ 532 typedef struct HMCPU 533 533 { 534 534 /** Old style FPU reporting trap mask override performed (optimization) */ … … 551 551 volatile uint32_t cWorldSwitchExits; 552 552 553 /** H WACCM_CHANGED_* flags. */553 /** HM_CHANGED_* flags. */ 554 554 uint32_t fContextUseFlags; 555 555 … … 578 578 579 579 /** Ring 0 handlers for VT-x. */ 580 PFNH WACCMVMXSTARTVM pfnStartVM;580 PFNHMVMXSTARTVM pfnStartVM; 581 581 582 582 #if HC_ARCH_BITS == 32 … … 658 658 /** The last seen guest paging mode (by VT-x). */ 659 659 PGMMODE enmLastSeenGuestMode; 660 /** Current guest paging mode (as seen by H WACCMR3PagingModeChanged). */660 /** Current guest paging mode (as seen by HMR3PagingModeChanged). */ 661 661 PGMMODE enmCurrGuestMode; 662 /** Previous guest paging mode (as seen by H WACCMR3PagingModeChanged). */662 /** Previous guest paging mode (as seen by HMR3PagingModeChanged). */ 663 663 PGMMODE enmPrevGuestMode; 664 664 } vmx; … … 681 681 682 682 /** Ring 0 handlers for VT-x. */ 683 PFNH WACCMSVMVMRUN pfnVMRun;683 PFNHMSVMVMRUN pfnVMRun; 684 684 685 685 /** R0 memory object for the MSR bitmap (8kb). */ … … 714 714 { 715 715 /* Pending IO operation type. */ 716 H WACCMPENDINGIO enmType;716 HMPENDINGIO enmType; 717 717 uint32_t uPadding; 718 718 RTGCPTR GCPtrRip; … … 734 734 735 735 /** The CPU ID of the CPU currently owning the VMCS. Set in 736 * H WACCMR0Enter and cleared in HWACCMR0Leave. */736 * HMR0Enter and cleared in HMR0Leave. */ 737 737 RTCPUID idEnteredCpu; 738 738 … … 740 740 struct 741 741 { 742 RTGCPTR aPages[H WACCM_MAX_TLB_SHOOTDOWN_PAGES];742 RTGCPTR aPages[HM_MAX_TLB_SHOOTDOWN_PAGES]; 743 743 unsigned cPages; 744 744 } TlbShootdown; … … 858 858 R0PTRTYPE(PSTAMCOUNTER) paStatInjectedIrqsR0; 859 859 #endif 860 } H WACCMCPU;861 /** Pointer to H WACCM VM instance data. */862 typedef H WACCMCPU *PHWACCMCPU;860 } HMCPU; 861 /** Pointer to HM VM instance data. */ 862 typedef HMCPU *PHMCPU; 863 863 864 864 865 865 #ifdef IN_RING0 866 866 867 VMMR0DECL(PHMGLOBLCPUINFO) H WACCMR0GetCurrentCpu(void);868 VMMR0DECL(PHMGLOBLCPUINFO) H WACCMR0GetCurrentCpuEx(RTCPUID idCpu);867 VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpu(void); 868 VMMR0DECL(PHMGLOBLCPUINFO) HMR0GetCurrentCpuEx(RTCPUID idCpu); 869 869 870 870 871 871 #ifdef VBOX_STRICT 872 VMMR0DECL(void) H WACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);873 VMMR0DECL(void) H WACCMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg);872 VMMR0DECL(void) HMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 873 VMMR0DECL(void) HMR0DumpDescriptor(PCX86DESCHC pDesc, RTSEL Sel, const char *pszMsg); 874 874 #else 875 # define H WACCMDumpRegs(a, b ,c) do { } while (0)876 # define H WACCMR0DumpDescriptor(a, b, c) do { } while (0)875 # define HMDumpRegs(a, b ,c) do { } while (0) 876 # define HMR0DumpDescriptor(a, b, c) do { } while (0) 877 877 #endif 878 878 879 879 # ifdef VBOX_WITH_KERNEL_USING_XMM 880 DECLASM(int) h waccmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHWACCMVMXSTARTVM pfnStartVM);881 DECLASM(int) h waccmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHWACCMSVMVMRUN pfnVMRun);880 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM); 881 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 882 882 # endif 883 883 … … 888 888 * @param pIdtr Where to store the 64-bit IDTR. 889 889 */ 890 DECLASM(void) h waccmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr);890 DECLASM(void) hmR0Get64bitGDTRandIDTR(PX86XDTR64 pGdtr, PX86XDTR64 pIdtr); 891 891 892 892 /** … … 894 894 * @returns CR3 895 895 */ 896 DECLASM(uint64_t) h waccmR0Get64bitCR3(void);896 DECLASM(uint64_t) hmR0Get64bitCR3(void); 897 897 # endif 898 898 -
trunk/src/VBox/VMM/include/HMInternal.mac
r43373 r43387 1 1 ;$Id$ 2 2 ;; @file 3 ; H WACCM - Internal header file.3 ; HM - Internal header file. 4 4 ; 5 5 ; -
trunk/src/VBox/VMM/include/PGMInline.h
r43303 r43387 32 32 #include <VBox/log.h> 33 33 #include <VBox/vmm/gmm.h> 34 #include <VBox/vmm/h waccm.h>34 #include <VBox/vmm/hm.h> 35 35 #include <iprt/asm.h> 36 36 #include <iprt/assert.h> -
trunk/src/VBox/VMM/include/PGMInternal.h
r43302 r43387 33 33 #include <VBox/log.h> 34 34 #include <VBox/vmm/gmm.h> 35 #include <VBox/vmm/h waccm.h>36 #include <VBox/vmm/h wacc_vmx.h>35 #include <VBox/vmm/hm.h> 36 #include <VBox/vmm/hm_vmx.h> 37 37 #include "internal/pgm.h" 38 38 #include <iprt/asm.h> … … 348 348 # define PGM_INVL_PG(pVCpu, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt)) 349 349 #elif defined(IN_RING0) 350 # define PGM_INVL_PG(pVCpu, GCVirt) H WACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))350 # define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt)) 351 351 #else 352 # define PGM_INVL_PG(pVCpu, GCVirt) H WACCMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt))352 # define PGM_INVL_PG(pVCpu, GCVirt) HMInvalidatePage(pVCpu, (RTGCPTR)(GCVirt)) 353 353 #endif 354 354 … … 362 362 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(uintptr_t)(GCVirt)) 363 363 #elif defined(IN_RING0) 364 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) H WACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))364 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt)) 365 365 #else 366 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) H WACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))366 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt)) 367 367 #endif 368 368 … … 376 376 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) ASMReloadCR3() 377 377 #elif defined(IN_RING0) 378 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) H WACCMFlushTLB(pVCpu)378 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTLB(pVCpu) 379 379 #else 380 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) H WACCMFlushTLB(pVCpu)380 # define PGM_INVL_BIG_PG(pVCpu, GCVirt) HMFlushTLB(pVCpu) 381 381 #endif 382 382 … … 389 389 # define PGM_INVL_VCPU_TLBS(pVCpu) ASMReloadCR3() 390 390 #elif defined(IN_RING0) 391 # define PGM_INVL_VCPU_TLBS(pVCpu) H WACCMFlushTLB(pVCpu)391 # define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTLB(pVCpu) 392 392 #else 393 # define PGM_INVL_VCPU_TLBS(pVCpu) H WACCMFlushTLB(pVCpu)393 # define PGM_INVL_VCPU_TLBS(pVCpu) HMFlushTLB(pVCpu) 394 394 #endif 395 395 … … 402 402 # define PGM_INVL_ALL_VCPU_TLBS(pVM) ASMReloadCR3() 403 403 #elif defined(IN_RING0) 404 # define PGM_INVL_ALL_VCPU_TLBS(pVM) H WACCMFlushTLBOnAllVCpus(pVM)404 # define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTLBOnAllVCpus(pVM) 405 405 #else 406 # define PGM_INVL_ALL_VCPU_TLBS(pVM) H WACCMFlushTLBOnAllVCpus(pVM)406 # define PGM_INVL_ALL_VCPU_TLBS(pVM) HMFlushTLBOnAllVCpus(pVM) 407 407 #endif 408 408 … … 3024 3024 bool fLessThan52PhysicalAddressBits; 3025 3025 /** Set when nested paging is active. 3026 * This is meant to save calls to H WACCMIsNestedPagingActive and let the3026 * This is meant to save calls to HMIsNestedPagingActive and let the 3027 3027 * compilers optimize the code better. Whether we use nested paging or 3028 3028 * not is something we find out during VMM initialization and we won't -
trunk/src/VBox/VMM/include/VMMInternal.h
r41976 r43387 507 507 VMMGC_DO_TESTCASE_INTERRUPT_MASKING, 508 508 /** Switching testing and profiling stub. */ 509 VMMGC_DO_TESTCASE_H WACCM_NOP,509 VMMGC_DO_TESTCASE_HM_NOP, 510 510 511 511 /** The usual 32-bit hack. */ -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r41976 r43387 36 36 PROGRAMS += tstGlobalConfig tstInstrEmul 37 37 ifdef VBOX_WITH_RAW_MODE 38 PROGRAMS += tstVMM tstVMM-H wAccm38 PROGRAMS += tstVMM tstVMM-HM 39 39 ifneq ($(KBUILD_TARGET),win) 40 40 PROGRAMS += tstVMMFork … … 256 256 tstVMM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME) 257 257 258 tstVMM-H wAccm_TEMPLATE= VBOXR3EXE259 tstVMM-H wAccm_SOURCES = tstVMM-HwAccm.cpp260 tstVMM-H wAccm_LIBS= $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)258 tstVMM-HM_TEMPLATE = VBOXR3EXE 259 tstVMM-HM_SOURCES = tstVMM-HM.cpp 260 tstVMM-HM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME) 261 261 262 262 tstVMMFork_TEMPLATE = VBOXR3EXE … … 396 396 $(VBOX_PATH_VMM_SRC)/include/CPUMInternal.mac \ 397 397 $(VBOX_PATH_VMM_SRC)/include/TRPMInternal.mac \ 398 $(VBOX_PATH_VMM_SRC)/include/H WACCMInternal.mac \398 $(VBOX_PATH_VMM_SRC)/include/HMInternal.mac \ 399 399 $(VBOX_PATH_VMM_SRC)/include/VMMInternal.mac \ 400 400 $(VBOX_PATH_VMM_SRC)/testcase/Makefile.kmk \ … … 423 423 $(DEPTH)/include/iprt/x86.mac \ 424 424 $(VBOX_PATH_VMM_SRC)/include/CPUMInternal.mac \ 425 $(VBOX_PATH_VMM_SRC)/include/H WACCMInternal.mac \425 $(VBOX_PATH_VMM_SRC)/include/HMInternal.mac \ 426 426 $(VBOX_PATH_VMM_SRC)/include/VMMInternal.mac \ 427 427 $(VBOX_PATH_VMM_SRC)/include/VMMSwitcher.mac \ -
trunk/src/VBox/VMM/testcase/tstAsmStructs.cpp
r41965 r43387 23 23 #include <VBox/vmm/trpm.h> 24 24 #include "TRPMInternal.h" 25 #include "H WACCMInternal.h"25 #include "HMInternal.h" 26 26 #include "VMMSwitcher.h" 27 27 #include "VMMInternal.h" -
trunk/src/VBox/VMM/testcase/tstAsmStructsAsm.asm
r35346 r43387 24 24 25 25 %include "CPUMInternal.mac" 26 %include "H WACCMInternal.mac"26 %include "HMInternal.mac" 27 27 %include "TRPMInternal.mac" 28 28 %include "VMMInternal.mac" -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r42407 r43387 1329 1329 GEN_CHECK_OFF(VM, fPATMEnabled); 1330 1330 GEN_CHECK_OFF(VM, fCSAMEnabled); 1331 GEN_CHECK_OFF(VM, fH WACCMEnabled);1331 GEN_CHECK_OFF(VM, fHMEnabled); 1332 1332 GEN_CHECK_OFF(VM, fHwVirtExtForced); 1333 1333 GEN_CHECK_OFF(VM, fFaultTolerantMaster); … … 1356 1356 GEN_CHECK_OFF(VM, vmm); 1357 1357 GEN_CHECK_OFF(VM, pgm); 1358 GEN_CHECK_OFF(VM, h waccm);1358 GEN_CHECK_OFF(VM, hm); 1359 1359 GEN_CHECK_OFF(VM, trpm); 1360 1360 GEN_CHECK_OFF(VM, selm); … … 1390 1390 GEN_CHECK_OFF(VMCPU, aStatAdHoc); 1391 1391 GEN_CHECK_OFF(VMCPU, cpum); 1392 GEN_CHECK_OFF(VMCPU, h waccm);1392 GEN_CHECK_OFF(VMCPU, hm); 1393 1393 GEN_CHECK_OFF(VMCPU, em); 1394 1394 GEN_CHECK_OFF(VMCPU, iem); -
trunk/src/VBox/VMM/testcase/tstVMStructDTrace.cpp
r41268 r43387 42 42 #include "IOMInternal.h" 43 43 #include "REMInternal.h" 44 #include "H WACCMInternal.h"44 #include "HMInternal.h" 45 45 #include "PATMInternal.h" 46 46 #include "VMMInternal.h" -
trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp
r41965 r43387 70 70 #include "IOMInternal.h" 71 71 #include "REMInternal.h" 72 #include "H WACCMInternal.h"72 #include "HMInternal.h" 73 73 #include "PATMInternal.h" 74 74 #include "VMMInternal.h" -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r41965 r43387 41 41 #include "REMInternal.h" 42 42 #include "SSMInternal.h" 43 #include "H WACCMInternal.h"43 #include "HMInternal.h" 44 44 #include "PATMInternal.h" 45 45 #include "VMMInternal.h" … … 207 207 PRINT_OFFSET(VM, pgm.s.CritSectX); 208 208 CHECK_PADDING_VM(64, pgm); 209 PRINT_OFFSET(VM, h waccm);210 CHECK_PADDING_VM(64, h waccm);209 PRINT_OFFSET(VM, hm); 210 CHECK_PADDING_VM(64, hm); 211 211 CHECK_PADDING_VM(64, trpm); 212 212 CHECK_PADDING_VM(64, selm); … … 227 227 PRINT_OFFSET(VMCPU, cpum); 228 228 CHECK_PADDING_VMCPU(64, cpum); 229 CHECK_PADDING_VMCPU(64, h waccm);229 CHECK_PADDING_VMCPU(64, hm); 230 230 CHECK_PADDING_VMCPU(64, em); 231 231 CHECK_PADDING_VMCPU(64, iem); … … 394 394 CHECK_MEMBER_ALIGNMENT(MMHYPERHEAP, Lock, sizeof(uintptr_t)); 395 395 396 /* h waccm - 32-bit gcc won't align uint64_t naturally, so check. */397 CHECK_MEMBER_ALIGNMENT(H WACCM, u64RegisterMask, 8);398 CHECK_MEMBER_ALIGNMENT(H WACCM, vmx.hostCR4, 8);399 CHECK_MEMBER_ALIGNMENT(H WACCM, vmx.msr.feature_ctrl, 8);400 CHECK_MEMBER_ALIGNMENT(H WACCM, StatTPRPatchSuccess, 8);401 CHECK_MEMBER_ALIGNMENT(H WACCMCPU, StatEntry, 8);402 CHECK_MEMBER_ALIGNMENT(H WACCMCPU, vmx.HCPhysVMCS, sizeof(RTHCPHYS));403 CHECK_MEMBER_ALIGNMENT(H WACCMCPU, vmx.proc_ctls, 8);404 CHECK_MEMBER_ALIGNMENT(H WACCMCPU, Event.intInfo, 8);396 /* hm - 32-bit gcc won't align uint64_t naturally, so check. */ 397 CHECK_MEMBER_ALIGNMENT(HM, u64RegisterMask, 8); 398 CHECK_MEMBER_ALIGNMENT(HM, vmx.hostCR4, 8); 399 CHECK_MEMBER_ALIGNMENT(HM, vmx.msr.feature_ctrl, 8); 400 CHECK_MEMBER_ALIGNMENT(HM, StatTPRPatchSuccess, 8); 401 CHECK_MEMBER_ALIGNMENT(HMCPU, StatEntry, 8); 402 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVMCS, sizeof(RTHCPHYS)); 403 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.proc_ctls, 8); 404 CHECK_MEMBER_ALIGNMENT(HMCPU, Event.intInfo, 8); 405 405 406 406 /* Make sure the set is large enough and has the correct size. */
Note:
See TracChangeset
for help on using the changeset viewer.