Changeset 13883 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 5, 2008 5:04:48 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r13879 r13883 103 103 pVM->hwaccm.s.fActive = false; 104 104 pVM->hwaccm.s.fNestedPaging = false; 105 106 /* On first entry we'll sync everything. */107 pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;108 105 109 106 /* … … 874 871 hwaccmR3DisableRawMode(pVM); 875 872 876 /* On first entry we'll sync everything. */877 pVM->hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL;878 879 873 for (unsigned i=0;i<pVM->cCPUs;i++) 880 874 { 875 /* On first entry we'll sync everything. */ 876 pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL; 877 881 878 pVM->aCpus[i].hwaccm.s.vmx.cr0_mask = 0; 882 879 pVM->aCpus[i].hwaccm.s.vmx.cr4_mask = 0; 883 } 884 885 pVM->hwaccm.s.Event.fPending = false;880 881 pVM->aCpus[i].hwaccm.s.Event.fPending = false; 882 } 886 883 887 884 /* Reset state information for real-mode emulation in VT-x. */ … … 1050 1047 VMMR3DECL(bool) HWACCMR3IsEventPending(PVM pVM) 1051 1048 { 1052 return HWACCMIsEnabled(pVM) && pVM->hwaccm.s.Event.fPending; 1049 /* @todo SMP */ 1050 return HWACCMIsEnabled(pVM) && pVM->aCpus[0].hwaccm.s.Event.fPending; 1053 1051 } 1054 1052 … … 1070 1068 1071 1069 case VERR_VMX_INVALID_VMCS_PTR: 1072 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM-> hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys));1073 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM-> hwaccm.s.vmx.lasterror.ulVMCSRevision));1070 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.pVMCSPhys)); 1071 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision)); 1074 1072 break; 1075 1073 1076 1074 case VERR_VMX_UNABLE_TO_START_VM: 1077 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM-> hwaccm.s.vmx.lasterror.ulLastInstrError));1078 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM-> hwaccm.s.vmx.lasterror.ulLastExitReason));1075 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastInstrError)); 1076 LogRel(("VERR_VMX_UNABLE_TO_START_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastExitReason)); 1079 1077 break; 1080 1078 1081 1079 case VERR_VMX_UNABLE_TO_RESUME_VM: 1082 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM-> hwaccm.s.vmx.lasterror.ulLastInstrError));1083 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM-> hwaccm.s.vmx.lasterror.ulLastExitReason));1080 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d instruction error %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastInstrError)); 1081 LogRel(("VERR_VMX_UNABLE_TO_RESUME_VM: CPU%d exit reason %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulLastExitReason)); 1084 1082 break; 1085 1083 … … 1103 1101 Log(("hwaccmR3Save:\n")); 1104 1102 1105 /* 1106 * Save the basic bits - fortunately all the other things can be resynced on load. 1107 */ 1108 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.fPending); 1109 AssertRCReturn(rc, rc); 1110 rc = SSMR3PutU32(pSSM, pVM->hwaccm.s.Event.errCode); 1111 AssertRCReturn(rc, rc); 1112 rc = SSMR3PutU64(pSSM, pVM->hwaccm.s.Event.intInfo); 1113 AssertRCReturn(rc, rc); 1103 for (unsigned i=0;i<pVM->cCPUs;i++) 1104 { 1105 /* 1106 * Save the basic bits - fortunately all the other things can be resynced on load. 1107 */ 1108 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.fPending); 1109 AssertRCReturn(rc, rc); 1110 rc = SSMR3PutU32(pSSM, pVM->aCpus[i].hwaccm.s.Event.errCode); 1111 AssertRCReturn(rc, rc); 1112 rc = SSMR3PutU64(pSSM, pVM->aCpus[i].hwaccm.s.Event.intInfo); 1113 AssertRCReturn(rc, rc); 1114 } 1114 1115 1115 1116 return VINF_SUCCESS; … … 1138 1139 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION; 1139 1140 } 1140 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.fPending); 1141 AssertRCReturn(rc, rc); 1142 rc = SSMR3GetU32(pSSM, &pVM->hwaccm.s.Event.errCode); 1143 AssertRCReturn(rc, rc); 1144 rc = SSMR3GetU64(pSSM, &pVM->hwaccm.s.Event.intInfo); 1145 AssertRCReturn(rc, rc); 1146 1141 for (unsigned i=0;i<pVM->cCPUs;i++) 1142 { 1143 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.fPending); 1144 AssertRCReturn(rc, rc); 1145 rc = SSMR3GetU32(pSSM, &pVM->aCpus[i].hwaccm.s.Event.errCode); 1146 AssertRCReturn(rc, rc); 1147 rc = SSMR3GetU64(pSSM, &pVM->aCpus[i].hwaccm.s.Event.intInfo); 1148 AssertRCReturn(rc, rc); 1149 } 1147 1150 return VINF_SUCCESS; 1148 1151 } -
trunk/src/VBox/VMM/HWACCMInternal.h
r13880 r13883 188 188 bool fAllowVPID; 189 189 190 /** Set if we need to flush the TLB during the world switch. */191 bool fForceTLBFlush;192 193 /** Old style FPU reporting trap mask override performed (optimization) */194 bool fFPUOldStyleOverride;195 196 190 /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask 197 191 * naturally. */ 198 192 bool padding[1]; 199 193 200 /** HWACCM_CHANGED_* flags. */201 RTUINT fContextUseFlags;202 203 /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */204 RTCPUID idLastCpu;205 206 /* TLB flush count */207 RTUINT cTLBFlushes;208 209 /* Current ASID in use by the VM */210 RTUINT uCurrentASID;211 212 194 /** Maximum ASID allowed. */ 213 195 RTUINT uMaxASID; … … 222 204 /** Set when we've enabled VMX. */ 223 205 bool fEnabled; 224 225 /** Set if we can use VMXResume to execute guest code. */226 bool fResumeVM;227 206 228 207 /** Set if VPID is supported. */ … … 271 250 272 251 /** Ring 0 handlers for VT-x. */ 273 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM ));252 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu)); 274 253 275 254 /** Host CR4 value (set by ring-0 VMX init) */ … … 304 283 VMX_FLUSH enmFlushPage; 305 284 VMX_FLUSH enmFlushContext; 306 307 /** Real-mode emulation state. */308 struct309 {310 X86EFLAGS eflags;311 uint32_t fValid;312 } RealMode;313 314 struct315 {316 uint64_t u64VMCSPhys;317 uint32_t ulVMCSRevision;318 uint32_t ulLastInstrError;319 uint32_t ulLastExitReason;320 uint32_t padding;321 } lasterror;322 285 } vmx; 323 286 … … 328 291 /** Set when we've enabled SVM. */ 329 292 bool fEnabled; 330 /** Set if we don't have to flush the TLB on VM entry. */331 bool fResumeVM;332 293 /** Set if erratum 170 affects the AMD cpu. */ 333 294 bool fAlwaysFlushTLB; 295 /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask 296 * naturally. */ 297 bool padding[1]; 334 298 335 299 /** R0 memory object for the host VM control block (VMCB). */ … … 366 330 uint32_t u32AMDFeatureEDX; 367 331 } cpuid; 368 369 #if HC_ARCH_BITS == 32370 uint32_t Alignment1;371 #endif372 373 /** Event injection state. */374 struct375 {376 uint32_t fPending;377 uint32_t errCode;378 uint64_t intInfo;379 } Event;380 332 381 333 /** Saved error from detection */ … … 469 421 typedef struct HWACCMCPU 470 422 { 471 /** Offset to the VM structure. 472 * See HWACCMCPU2VM(). */ 473 RTUINT offVMCPU; 423 /** Old style FPU reporting trap mask override performed (optimization) */ 424 bool fFPUOldStyleOverride; 425 426 /** Set if we don't have to flush the TLB on VM entry. */ 427 bool fResumeVM; 428 429 /** Set if we need to flush the TLB during the world switch. */ 430 bool fForceTLBFlush; 431 432 /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask 433 * naturally. */ 434 bool padding[1]; 435 436 /** HWACCM_CHANGED_* flags. */ 437 RTUINT fContextUseFlags; 438 439 /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */ 440 RTCPUID idLastCpu; 441 442 /* TLB flush count */ 443 RTUINT cTLBFlushes; 444 445 /* Current ASID in use by the VM */ 446 RTUINT uCurrentASID; 474 447 475 448 struct … … 495 468 /** Current EPTP. */ 496 469 RTHCPHYS GCPhysEPTP; 470 471 /** Real-mode emulation state. */ 472 struct 473 { 474 X86EFLAGS eflags; 475 uint32_t fValid; 476 } RealMode; 477 478 struct 479 { 480 uint64_t u64VMCSPhys; 481 uint32_t ulVMCSRevision; 482 uint32_t ulLastInstrError; 483 uint32_t ulLastExitReason; 484 uint32_t padding; 485 } lasterror; 486 497 487 } vmx; 498 488 … … 510 500 511 501 } svm; 502 503 /** Event injection state. */ 504 struct 505 { 506 uint32_t fPending; 507 uint32_t errCode; 508 uint64_t intInfo; 509 } Event; 512 510 513 511 } HWACCMCPU; -
trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp
r13832 r13883 54 54 { 55 55 #ifdef IN_RING0 56 PVMCPU pVCpu = &pVM->aCpus[HWACCMGetVMCPUId(pVM)]; 56 57 if (pVM->hwaccm.s.vmx.fSupported) 57 return VMXR0InvalidatePage(pVM, GCVirt);58 return VMXR0InvalidatePage(pVM, pVCpu, GCVirt); 58 59 59 60 Assert(pVM->hwaccm.s.svm.fSupported); 60 return SVMR0InvalidatePage(pVM, GCVirt);61 return SVMR0InvalidatePage(pVM, pVCpu, GCVirt); 61 62 #endif 62 63 … … 74 75 LogFlow(("HWACCMFlushTLB\n")); 75 76 76 pVM-> hwaccm.s.fForceTLBFlush = true;77 pVM->aCpus[HWACCMGetVMCPUId(pVM)].hwaccm.s.fForceTLBFlush = true; 77 78 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBManual); 78 79 return VINF_SUCCESS; … … 120 121 121 122 #ifdef IN_RING0 123 PVMCPU pVCpu = &pVM->aCpus[HWACCMGetVMCPUId(pVM)]; 122 124 if (pVM->hwaccm.s.vmx.fSupported) 123 return VMXR0InvalidatePhysPage(pVM, GCPhys);125 return VMXR0InvalidatePhysPage(pVM, pVCpu, GCPhys); 124 126 125 127 Assert(pVM->hwaccm.s.svm.fSupported); 126 SVMR0InvalidatePhysPage(pVM, GCPhys);128 SVMR0InvalidatePhysPage(pVM, pVCpu, GCPhys); 127 129 #else 128 130 HWACCMFlushTLB(pVM); … … 139 141 VMMDECL(bool) HWACCMHasPendingIrq(PVM pVM) 140 142 { 141 return !!pVM->hwaccm.s.Event.fPending; 143 /* @todo SMP */ 144 return !!pVM->aCpus[0].hwaccm.s.Event.fPending; 142 145 } 143 146 -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r13879 r13883 780 780 pVM->hwaccm.s.uMaxASID = HWACCMR0Globals.uMaxASID; 781 781 782 /* Invalidate the last cpu we were running on. */ 783 pVM->hwaccm.s.idLastCpu = NIL_RTCPUID; 784 785 /* we'll aways increment this the first time (host uses ASID 0) */ 786 pVM->hwaccm.s.uCurrentASID = 0; 782 for (unsigned i=0;i<pVM->cCPUs;i++) 783 { 784 /* Invalidate the last cpu we were running on. */ 785 pVM->aCpus[i].hwaccm.s.idLastCpu = NIL_RTCPUID; 786 787 /* we'll aways increment this the first time (host uses ASID 0) */ 788 pVM->aCpus[i].hwaccm.s.uCurrentASID = 0; 789 } 787 790 788 791 ASMAtomicWriteBool(&pCpu->fInUse, true); … … 850 853 851 854 ASMAtomicWriteBool(&pCpu->fInUse, true); 855 856 for (unsigned i=0;i<pVM->cCPUs;i++) 857 { 858 /* On first entry we'll sync everything. */ 859 pVM->aCpus[i].hwaccm.s.fContextUseFlags = HWACCM_CHANGED_ALL; 860 } 852 861 853 862 /* Setup VT-x or AMD-V. */ … … 887 896 888 897 /* Always reload the host context and the guest's CR0 register. (!!!!) */ 889 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT;898 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_HOST_CONTEXT; 890 899 891 900 /* Setup the register and mask according to the current execution mode. */ … … 943 952 CPUMR0SaveGuestFPU(pVM, pCtx); 944 953 945 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;954 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 946 955 } 947 956 -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r13879 r13883 391 391 * @returns VBox status code. 392 392 * @param pVM The VM to operate on. 393 * @param pVCpu The VM CPU to operate on. 393 394 * @param pVMCB SVM control block 394 395 * @param pCtx CPU Context 395 396 */ 396 static int SVMR0CheckPendingInterrupt(PVM pVM, SVM_VMCB *pVMCB, CPUMCTX *pCtx)397 static int SVMR0CheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, SVM_VMCB *pVMCB, CPUMCTX *pCtx) 397 398 { 398 399 int rc; 399 400 400 401 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */ 401 if (pV M->hwaccm.s.Event.fPending)402 if (pVCpu->hwaccm.s.Event.fPending) 402 403 { 403 404 SVM_EVENT Event; 404 405 405 Log(("Reinjecting event %08x %08x at %RGv\n", pV M->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip));406 Log(("Reinjecting event %08x %08x at %RGv\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip)); 406 407 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject); 407 Event.au64[0] = pV M->hwaccm.s.Event.intInfo;408 Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo; 408 409 SVMR0InjectEvent(pVM, pVMCB, pCtx, &Event); 409 410 410 pV M->hwaccm.s.Event.fPending = false;411 pVCpu->hwaccm.s.Event.fPending = false; 411 412 return VINF_SUCCESS; 412 413 } … … 525 526 * @returns VBox status code. 526 527 * @param pVM The VM to operate on. 527 * @param pV MCPUThe VM CPU to operate on.528 * @param pVCpu The VM CPU to operate on. 528 529 */ 529 530 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu) … … 542 543 * @returns VBox status code. 543 544 * @param pVM The VM to operate on. 544 * @param pV MCPUThe VM CPU to operate on.545 * @param pVCpu The VM CPU to operate on. 545 546 * @param pCtx Guest context 546 547 */ … … 560 561 561 562 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ 562 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)563 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS) 563 564 { 564 565 SVM_WRITE_SELREG(CS, cs); … … 571 572 572 573 /* Guest CPU context: LDTR. */ 573 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)574 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR) 574 575 { 575 576 SVM_WRITE_SELREG(LDTR, ldtr); … … 577 578 578 579 /* Guest CPU context: TR. */ 579 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)580 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR) 580 581 { 581 582 SVM_WRITE_SELREG(TR, tr); … … 583 584 584 585 /* Guest CPU context: GDTR. */ 585 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)586 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR) 586 587 { 587 588 pVMCB->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; … … 590 591 591 592 /* Guest CPU context: IDTR. */ 592 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)593 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR) 593 594 { 594 595 pVMCB->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; … … 604 605 605 606 /* Control registers */ 606 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)607 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0) 607 608 { 608 609 val = pCtx->cr0; … … 620 621 621 622 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */ 622 if (!pV M->hwaccm.s.fFPUOldStyleOverride)623 if (!pVCpu->hwaccm.s.fFPUOldStyleOverride) 623 624 { 624 625 pVMCB->ctrl.u32InterceptException |= RT_BIT(X86_XCPT_MF); 625 pV M->hwaccm.s.fFPUOldStyleOverride = true;626 pVCpu->hwaccm.s.fFPUOldStyleOverride = true; 626 627 } 627 628 } … … 643 644 pVMCB->guest.u64CR2 = pCtx->cr2; 644 645 645 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)646 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3) 646 647 { 647 648 /* Save our shadow CR3 register. */ … … 659 660 } 660 661 661 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)662 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4) 662 663 { 663 664 val = pCtx->cr4; … … 698 699 699 700 /* Debug registers. */ 700 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)701 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG) 701 702 { 702 703 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */ … … 789 790 790 791 /* Done. */ 791 pV M->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;792 pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST; 792 793 793 794 return VINF_SUCCESS; … … 800 801 * @returns VBox status code. 801 802 * @param pVM The VM to operate on. 802 * @param pV MCPUThe VM CPU to operate on.803 * @param pVCpu The VM CPU to operate on. 803 804 * @param pCtx Guest context 804 805 */ … … 882 883 /* When external interrupts are pending, we should exit the VM when IF is set. */ 883 884 /* Note! *After* VM_FF_INHIBIT_INTERRUPTS check!!! */ 884 rc = SVMR0CheckPendingInterrupt(pVM, pV MCB, pCtx);885 rc = SVMR0CheckPendingInterrupt(pVM, pVCpu, pVMCB, pCtx); 885 886 if (RT_FAILURE(rc)) 886 887 { … … 923 924 #ifdef LOG_ENABLED 924 925 pCpu = HWACCMR0GetCurrentCpu(); 925 if ( pV M->hwaccm.s.idLastCpu != pCpu->idCpu926 || pV M->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)927 { 928 if (pV M->hwaccm.s.idLastCpu != pCpu->idCpu)929 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pV M->hwaccm.s.idLastCpu, pCpu->idCpu));926 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 927 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 928 { 929 if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu) 930 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu)); 930 931 else 931 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pV M->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));932 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 932 933 } 933 934 if (pCpu->fFlushTLB) … … 955 956 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */ 956 957 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */ 957 if ( pV M->hwaccm.s.idLastCpu != pCpu->idCpu958 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 958 959 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */ 959 || pV M->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)960 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 960 961 { 961 962 /* Force a TLB flush on VM entry. */ 962 pV M->hwaccm.s.fForceTLBFlush = true;963 pVCpu->hwaccm.s.fForceTLBFlush = true; 963 964 } 964 965 else 965 966 Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB); 966 967 967 pV M->hwaccm.s.idLastCpu = pCpu->idCpu;968 pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu; 968 969 969 970 /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */ 970 if ( pV M->hwaccm.s.fForceTLBFlush971 if ( pVCpu->hwaccm.s.fForceTLBFlush 971 972 && !pVM->hwaccm.s.svm.fAlwaysFlushTLB) 972 973 { … … 982 983 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID); 983 984 984 pV M->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;985 pV M->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;985 pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes; 986 pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID; 986 987 } 987 988 else … … 990 991 991 992 /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */ 992 if (!pCpu->uCurrentASID || !pV M->hwaccm.s.uCurrentASID)993 pV M->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;994 995 Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pV M->hwaccm.s.fForceTLBFlush);996 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pV M->hwaccm.s.fForceTLBFlush;997 } 998 AssertMsg(pV M->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));993 if (!pCpu->uCurrentASID || !pVCpu->hwaccm.s.uCurrentASID) 994 pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1; 995 996 Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVCpu->hwaccm.s.fForceTLBFlush); 997 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVCpu->hwaccm.s.fForceTLBFlush; 998 } 999 AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 999 1000 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 1000 AssertMsg(pV M->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID));1001 pVMCB->ctrl.TLBCtrl.n.u32ASID = pV M->hwaccm.s.uCurrentASID;1001 AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID)); 1002 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hwaccm.s.uCurrentASID; 1002 1003 1003 1004 #ifdef VBOX_WITH_STATISTICS … … 1009 1010 1010 1011 /* In case we execute a goto ResumeExecution later on. */ 1011 pV M->hwaccm.s.svm.fResumeVM = true;1012 pV M->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;1012 pVCpu->hwaccm.s.fResumeVM = true; 1013 pVCpu->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB; 1013 1014 1014 1015 Assert(sizeof(pVCpu->hwaccm.s.svm.pVMCBPhys) == 8); … … 1237 1238 1238 1239 /* Check if an injected event was interrupted prematurely. */ 1239 pV M->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0];1240 pVCpu->hwaccm.s.Event.intInfo = pVMCB->ctrl.ExitIntInfo.au64[0]; 1240 1241 if ( pVMCB->ctrl.ExitIntInfo.n.u1Valid 1241 1242 && pVMCB->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT /* we don't care about 'int xx' as the instruction will be restarted. */) 1242 1243 { 1243 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pV M->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode));1244 Log(("Pending inject %RX64 at %RGv exit=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitCode)); 1244 1245 1245 1246 #ifdef LOG_ENABLED 1246 1247 SVM_EVENT Event; 1247 Event.au64[0] = pV M->hwaccm.s.Event.intInfo;1248 Event.au64[0] = pVCpu->hwaccm.s.Event.intInfo; 1248 1249 1249 1250 if ( exitCode == SVM_EXIT_EXCEPTION_E … … 1254 1255 #endif 1255 1256 1256 pV M->hwaccm.s.Event.fPending = true;1257 pVCpu->hwaccm.s.Event.fPending = true; 1257 1258 /* Error code present? (redundant) */ 1258 1259 if (pVMCB->ctrl.ExitIntInfo.n.u1ErrorCodeValid) 1259 1260 { 1260 pV M->hwaccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode;1261 pVCpu->hwaccm.s.Event.errCode = pVMCB->ctrl.ExitIntInfo.n.u32ErrorCode; 1261 1262 } 1262 1263 else 1263 pV M->hwaccm.s.Event.errCode = 0;1264 pVCpu->hwaccm.s.Event.errCode = 0; 1264 1265 } 1265 1266 #ifdef VBOX_WITH_STATISTICS … … 1336 1337 /* Continue execution. */ 1337 1338 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x); 1338 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;1339 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 1339 1340 1340 1341 goto ResumeExecution; … … 1641 1642 { 1642 1643 case 0: 1643 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;1644 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 1644 1645 break; 1645 1646 case 2: … … 1647 1648 case 3: 1648 1649 Assert(!pVM->hwaccm.s.fNestedPaging); 1649 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;1650 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3; 1650 1651 break; 1651 1652 case 4: 1652 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;1653 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4; 1653 1654 break; 1654 1655 case 8: … … 1667 1668 1668 1669 /* Must be set by PGMSyncCR3 */ 1669 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pV M->hwaccm.s.fForceTLBFlush);1670 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVCpu->hwaccm.s.fForceTLBFlush); 1670 1671 } 1671 1672 if (rc == VINF_SUCCESS) … … 1733 1734 { 1734 1735 /* EIP has been updated already. */ 1735 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;1736 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG; 1736 1737 1737 1738 /* Only resume if successful. */ … … 2036 2037 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq); 2037 2038 /* On the next entry we'll only sync the host context. */ 2038 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;2039 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT; 2039 2040 } 2040 2041 else … … 2043 2044 /** @todo we can do better than this */ 2044 2045 /* Not in the VINF_PGM_CHANGE_MODE though! */ 2045 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;2046 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL; 2046 2047 } 2047 2048 … … 2066 2067 Assert(pVM->hwaccm.s.svm.fSupported); 2067 2068 2068 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pV M->hwaccm.s.idLastCpu, pVM->hwaccm.s.uCurrentASID));2069 pV M->hwaccm.s.svm.fResumeVM = false;2069 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hwaccm.s.idLastCpu, pVCpu->hwaccm.s.uCurrentASID)); 2070 pVCpu->hwaccm.s.fResumeVM = false; 2070 2071 2071 2072 /* Force to reload LDTR, so we'll execute VMLoad to load additional guest state. */ 2072 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR;2073 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_LDTR; 2073 2074 2074 2075 return VINF_SUCCESS; … … 2100 2101 2101 2102 /* Resync the debug registers the next time. */ 2102 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;2103 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG; 2103 2104 } 2104 2105 else … … 2199 2200 * @returns VBox status code. 2200 2201 * @param pVM The VM to operate on. 2202 * @param pVCpu The VM CPU to operate on. 2201 2203 * @param GCVirt Page to invalidate 2202 2204 */ 2203 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)2205 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 2204 2206 { 2205 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pV M->hwaccm.s.fForceTLBFlush;2207 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVCpu->hwaccm.s.fForceTLBFlush; 2206 2208 2207 2209 /* Skip it if a TLB flush is already pending. */ … … 2230 2232 * @returns VBox status code. 2231 2233 * @param pVM The VM to operate on. 2234 * @param pVCpu The VM CPU to operate on. 2232 2235 * @param GCPhys Page to invalidate 2233 2236 */ 2234 VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)2237 VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys) 2235 2238 { 2236 2239 Assert(pVM->hwaccm.s.fNestedPaging); 2237 2240 /* invlpga only invalidates TLB entries for guest virtual addresses; we have no choice but to force a TLB flush here. */ 2238 pV M->hwaccm.s.fForceTLBFlush = true;2241 pVCpu->hwaccm.s.fForceTLBFlush = true; 2239 2242 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBInvlpga); 2240 2243 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.h
r13879 r13883 47 47 * @returns VBox status code. 48 48 * @param pVM The VM to operate on. 49 * @param pVCpu VPCPU id.49 * @param pVCpu The VMCPU to operate on. 50 50 * @param pCpu CPU info struct 51 51 */ … … 57 57 * @returns VBox status code. 58 58 * @param pVM The VM to operate on. 59 * @param pVCpu VPCPU id.59 * @param pVCpu The VMCPU to operate on. 60 60 * @param pCtx CPU context 61 61 */ … … 113 113 * @returns VBox status code. 114 114 * @param pVM The VM to operate on. 115 * @param pVCpu VPCPU id.115 * @param pVCpu The VMCPU to operate on. 116 116 * @param pCtx Guest context 117 117 */ … … 124 124 * @returns VBox status code. 125 125 * @param pVM The VM to operate on. 126 * @param pVCpu VPCPU id.126 * @param pVCpu The VMCPU to operate on. 127 127 */ 128 128 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu); … … 133 133 * @returns VBox status code. 134 134 * @param pVM The VM to operate on. 135 * @param pVCpu VPCPU id.135 * @param pVCpu The VMCPU to operate on. 136 136 * @param pCtx Guest context 137 137 */ -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r13879 r13883 51 51 * Local Functions * 52 52 *******************************************************************************/ 53 static void VMXR0ReportWorldSwitchError(PVM pVM, int rc, PCPUMCTX pCtx);54 static void vmxR0SetupTLBEPT(PVM pVM );55 static void vmxR0SetupTLBVPID(PVM pVM );56 static void vmxR0SetupTLBDummy(PVM pVM );57 static void vmxR0FlushEPT(PVM pVM, VMX_FLUSH enmFlush, RTGCPHYS GCPhys);58 static void vmxR0FlushVPID(PVM pVM, VMX_FLUSH enmFlush, RTGCPTR GCPtr);59 static void vmxR0UpdateExceptionBitmap(PVM pVM, P CPUMCTX pCtx);53 static void VMXR0ReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTX pCtx); 54 static void vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu); 55 static void vmxR0SetupTLBVPID(PVM pVM, PVMCPU pVCpu); 56 static void vmxR0SetupTLBDummy(PVM pVM, PVMCPU pVCpu); 57 static void vmxR0FlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPHYS GCPhys); 58 static void vmxR0FlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPTR GCPtr); 59 static void vmxR0UpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 60 60 61 61 … … 533 533 * @returns VBox status code. 534 534 * @param pVM The VM to operate on. 535 * @param pVCpu The VMCPU to operate on. 535 536 * @param pCtx CPU Context 536 537 * @param intInfo VMX interrupt info … … 538 539 * @param errCode Error code (optional) 539 540 */ 540 static int VMXR0InjectEvent(PVM pVM, CPUMCTX *pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)541 static int VMXR0InjectEvent(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode) 541 542 { 542 543 int rc; … … 583 584 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 584 585 585 return VMXR0InjectEvent(pVM, p Ctx, intInfo, 0, 0 /* no error code according to the Intel docs */);586 return VMXR0InjectEvent(pVM, pVCpu, pCtx, intInfo, 0, 0 /* no error code according to the Intel docs */); 586 587 } 587 588 Log(("Triple fault -> reset the VM!\n")); … … 622 623 pCtx->eflags.u &= ~(X86_EFL_IF|X86_EFL_TF|X86_EFL_RF|X86_EFL_AC); 623 624 624 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS;625 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_SEGMENT_REGS; 625 626 return VINF_SUCCESS; 626 627 } … … 643 644 * @returns VBox status code. 644 645 * @param pVM The VM to operate on. 645 * @param idVCpu VMCPU id.646 * @param pVCpu The VMCPU to operate on. 646 647 * @param pCtx CPU Context 647 648 */ … … 651 652 652 653 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */ 653 if (pV M->hwaccm.s.Event.fPending)654 { 655 Log(("Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pV M->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2));654 if (pVCpu->hwaccm.s.Event.fPending) 655 { 656 Log(("Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->hwaccm.s.Event.intInfo, pVCpu->hwaccm.s.Event.errCode, (RTGCPTR)pCtx->rip, pCtx->cr2)); 656 657 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject); 657 rc = VMXR0InjectEvent(pVM, p Ctx, pVM->hwaccm.s.Event.intInfo, 0, pVM->hwaccm.s.Event.errCode);658 AssertRC(rc); 659 660 pV M->hwaccm.s.Event.fPending = false;658 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, pVCpu->hwaccm.s.Event.intInfo, 0, pVCpu->hwaccm.s.Event.errCode); 659 AssertRC(rc); 660 661 pVCpu->hwaccm.s.Event.fPending = false; 661 662 return VINF_SUCCESS; 662 663 } … … 759 760 760 761 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject); 761 rc = VMXR0InjectEvent(pVM, p Ctx, intInfo, 0, errCode);762 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, intInfo, 0, errCode); 762 763 AssertRC(rc); 763 764 } /* if (interrupts can be dispatched) */ … … 771 772 * @returns VBox status code. 772 773 * @param pVM The VM to operate on. 773 * @param idVCpu VPCPU id.774 * @param pVCpu The VMCPU to operate on. 774 775 */ 775 776 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu) … … 780 781 * Host CPU Context 781 782 */ 782 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)783 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT) 783 784 { 784 785 RTIDTR idtr; … … 870 871 AssertRC(rc); 871 872 872 pV M->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;873 pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT; 873 874 } 874 875 return rc; … … 903 904 * 904 905 * @param pVM The VM to operate on. 906 * @param pVCpu The VMCPU to operate on. 905 907 * @param pCtx Guest context 906 908 */ 907 static void vmxR0UpdateExceptionBitmap(PVM pVM, P CPUMCTX pCtx)909 static void vmxR0UpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 908 910 { 909 911 uint32_t u32TrapMask; … … 919 921 if ( CPUMIsGuestFPUStateActive(pVM) == true 920 922 && !(pCtx->cr0 & X86_CR0_NE) 921 && !pV M->hwaccm.s.fFPUOldStyleOverride)923 && !pVCpu->hwaccm.s.fFPUOldStyleOverride) 922 924 { 923 925 u32TrapMask |= RT_BIT(X86_XCPT_MF); 924 pV M->hwaccm.s.fFPUOldStyleOverride = true;926 pVCpu->hwaccm.s.fFPUOldStyleOverride = true; 925 927 } 926 928 … … 952 954 * @returns VBox status code. 953 955 * @param pVM The VM to operate on. 954 * @param idVCpu VPCPU id.956 * @param pVCpu The VMCPU to operate on. 955 957 * @param pCtx Guest context 956 958 */ … … 962 964 963 965 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */ 964 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)966 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS) 965 967 { 966 968 #ifdef HWACCM_VMX_EMULATE_REALMODE … … 1034 1036 1035 1037 /* Guest CPU context: LDTR. */ 1036 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)1038 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR) 1037 1039 { 1038 1040 if (pCtx->ldtr == 0) … … 1054 1056 } 1055 1057 /* Guest CPU context: TR. */ 1056 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)1058 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR) 1057 1059 { 1058 1060 #ifdef HWACCM_VMX_EMULATE_REALMODE … … 1098 1100 } 1099 1101 /* Guest CPU context: GDTR. */ 1100 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)1102 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR) 1101 1103 { 1102 1104 rc = VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt); … … 1105 1107 } 1106 1108 /* Guest CPU context: IDTR. */ 1107 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)1109 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR) 1108 1110 { 1109 1111 rc = VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt); … … 1121 1123 1122 1124 /* Control registers */ 1123 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)1125 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0) 1124 1126 { 1125 1127 val = pCtx->cr0; … … 1187 1189 AssertRC(rc); 1188 1190 } 1189 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)1191 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4) 1190 1192 { 1191 1193 /* CR4 */ … … 1258 1260 } 1259 1261 1260 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)1262 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3) 1261 1263 { 1262 1264 if (pVM->hwaccm.s.fNestedPaging) … … 1309 1311 1310 1312 /* Debug registers. */ 1311 if (pV M->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)1313 if (pVCpu->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG) 1312 1314 { 1313 1315 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */ … … 1363 1365 if (CPUMIsGuestInRealModeEx(pCtx)) 1364 1366 { 1365 pV M->hwaccm.s.vmx.RealMode.eflags = eflags;1367 pVCpu->hwaccm.s.vmx.RealMode.eflags = eflags; 1366 1368 1367 1369 eflags.Bits.u1VM = 1; … … 1433 1435 } 1434 1436 1435 vmxR0UpdateExceptionBitmap(pVM, p Ctx);1437 vmxR0UpdateExceptionBitmap(pVM, pVCpu, pCtx); 1436 1438 1437 1439 /* Done. */ 1438 pV M->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;1440 pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST; 1439 1441 1440 1442 return rc; … … 1446 1448 * @returns VBox status code. 1447 1449 * @param pVM The VM to operate on. 1448 * @param idVCpu VMCPU id.1450 * @param pVCpu The VMCPU to operate on. 1449 1451 * @param pCtx Guest context 1450 1452 */ … … 1549 1551 /* Hide our emulation flags */ 1550 1552 pCtx->eflags.Bits.u1VM = 0; 1551 pCtx->eflags.Bits.u2IOPL = pV M->hwaccm.s.vmx.RealMode.eflags.Bits.u2IOPL;1553 pCtx->eflags.Bits.u2IOPL = pVCpu->hwaccm.s.vmx.RealMode.eflags.Bits.u2IOPL; 1552 1554 1553 1555 /* Force a TR resync every time in case we switch modes. */ 1554 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR;1556 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_TR; 1555 1557 } 1556 1558 else … … 1567 1569 * 1568 1570 * @param pVM The VM to operate on. 1571 * @param pVCpu The VMCPU to operate on. 1569 1572 */ 1570 static void vmxR0SetupTLBDummy(PVM pVM )1573 static void vmxR0SetupTLBDummy(PVM pVM, PVMCPU pVCpu) 1571 1574 { 1575 NOREF(pVM); 1576 NOREF(pVCpu); 1572 1577 return; 1573 1578 } … … 1578 1583 * @returns VBox status code. 1579 1584 * @param pVM The VM to operate on. 1585 * @param pVCpu The VMCPU to operate on. 1580 1586 */ 1581 static void vmxR0SetupTLBEPT(PVM pVM )1587 static void vmxR0SetupTLBEPT(PVM pVM, PVMCPU pVCpu) 1582 1588 { 1583 1589 PHWACCM_CPUINFO pCpu; … … 1590 1596 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */ 1591 1597 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */ 1592 if ( pV M->hwaccm.s.idLastCpu != pCpu->idCpu1598 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 1593 1599 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */ 1594 || pV M->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)1600 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 1595 1601 { 1596 1602 /* Force a TLB flush on VM entry. */ 1597 pV M->hwaccm.s.fForceTLBFlush = true;1603 pVCpu->hwaccm.s.fForceTLBFlush = true; 1598 1604 } 1599 1605 else 1600 1606 Assert(!pCpu->fFlushTLB); 1601 1607 1602 pV M->hwaccm.s.idLastCpu = pCpu->idCpu;1608 pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu; 1603 1609 pCpu->fFlushTLB = false; 1604 1610 1605 if (pV M->hwaccm.s.fForceTLBFlush)1606 vmxR0FlushEPT(pVM, pV M->hwaccm.s.vmx.enmFlushContext, 0);1611 if (pVCpu->hwaccm.s.fForceTLBFlush) 1612 vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0); 1607 1613 1608 1614 #ifdef VBOX_WITH_STATISTICS 1609 if (pV M->hwaccm.s.fForceTLBFlush)1615 if (pVCpu->hwaccm.s.fForceTLBFlush) 1610 1616 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch); 1611 1617 else … … 1620 1626 * @returns VBox status code. 1621 1627 * @param pVM The VM to operate on. 1628 * @param pVCpu The VMCPU to operate on. 1622 1629 */ 1623 static void vmxR0SetupTLBVPID(PVM pVM )1630 static void vmxR0SetupTLBVPID(PVM pVM, PVMCPU pVCpu) 1624 1631 { 1625 1632 PHWACCM_CPUINFO pCpu; … … 1632 1639 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */ 1633 1640 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */ 1634 if ( pV M->hwaccm.s.idLastCpu != pCpu->idCpu1641 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 1635 1642 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */ 1636 || pV M->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)1643 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 1637 1644 { 1638 1645 /* Force a TLB flush on VM entry. */ 1639 pV M->hwaccm.s.fForceTLBFlush = true;1646 pVCpu->hwaccm.s.fForceTLBFlush = true; 1640 1647 } 1641 1648 else 1642 1649 Assert(!pCpu->fFlushTLB); 1643 1650 1644 pV M->hwaccm.s.idLastCpu = pCpu->idCpu;1651 pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu; 1645 1652 1646 1653 /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */ 1647 if (pV M->hwaccm.s.fForceTLBFlush)1654 if (pVCpu->hwaccm.s.fForceTLBFlush) 1648 1655 { 1649 1656 if ( ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID … … 1657 1664 { 1658 1665 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID); 1659 pV M->hwaccm.s.fForceTLBFlush = false;1660 } 1661 1662 pV M->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes;1663 pV M->hwaccm.s.uCurrentASID = pCpu->uCurrentASID;1666 pVCpu->hwaccm.s.fForceTLBFlush = false; 1667 } 1668 1669 pVCpu->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes; 1670 pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID; 1664 1671 } 1665 1672 else … … 1667 1674 Assert(!pCpu->fFlushTLB); 1668 1675 1669 if (!pCpu->uCurrentASID || !pV M->hwaccm.s.uCurrentASID)1670 pV M->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1;1671 } 1672 AssertMsg(pV M->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));1676 if (!pCpu->uCurrentASID || !pVCpu->hwaccm.s.uCurrentASID) 1677 pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1; 1678 } 1679 AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1673 1680 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 1674 AssertMsg(pV M->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID));1675 1676 int rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pV M->hwaccm.s.uCurrentASID);1681 AssertMsg(pVCpu->hwaccm.s.uCurrentASID >= 1 && pVCpu->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVCpu->hwaccm.s.uCurrentASID)); 1682 1683 int rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pVCpu->hwaccm.s.uCurrentASID); 1677 1684 AssertRC(rc); 1678 1685 1679 if (pV M->hwaccm.s.fForceTLBFlush)1680 vmxR0FlushVPID(pVM, pV M->hwaccm.s.vmx.enmFlushContext, 0);1686 if (pVCpu->hwaccm.s.fForceTLBFlush) 1687 vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0); 1681 1688 1682 1689 #ifdef VBOX_WITH_STATISTICS 1683 if (pV M->hwaccm.s.fForceTLBFlush)1690 if (pVCpu->hwaccm.s.fForceTLBFlush) 1684 1691 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch); 1685 1692 else … … 1694 1701 * @returns VBox status code. 1695 1702 * @param pVM The VM to operate on. 1696 * @param idVCpu VPCPU id.1703 * @param pVCpu The VMCPU to operate on. 1697 1704 * @param pCtx Guest context 1698 1705 */ … … 1879 1886 { 1880 1887 pCpu = HWACCMR0GetCurrentCpu(); 1881 if ( pV M->hwaccm.s.idLastCpu != pCpu->idCpu1882 || pV M->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes)1883 { 1884 if (pV M->hwaccm.s.idLastCpu != pCpu->idCpu)1885 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pV M->hwaccm.s.idLastCpu, pCpu->idCpu));1888 if ( pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu 1889 || pVCpu->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 1890 { 1891 if (pVCpu->hwaccm.s.idLastCpu != pCpu->idCpu) 1892 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hwaccm.s.idLastCpu, pCpu->idCpu)); 1886 1893 else 1887 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pV M->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes));1894 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1888 1895 } 1889 1896 if (pCpu->fFlushTLB) 1890 1897 Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu)); 1891 1898 else 1892 if (pV M->hwaccm.s.fForceTLBFlush)1899 if (pVCpu->hwaccm.s.fForceTLBFlush) 1893 1900 LogFlow(("Manual TLB flush\n")); 1894 1901 } … … 1918 1925 1919 1926 /* Deal with tagged TLB setup and invalidation. */ 1920 pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM );1927 pVM->hwaccm.s.vmx.pfnSetupTaggedTLB(pVM, pVCpu); 1921 1928 1922 1929 /* Non-register state Guest Context */ … … 1945 1952 #endif 1946 1953 TMNotifyStartOfExecution(pVM); 1947 rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pV M->hwaccm.s.vmx.fResumeVM, pCtx);1954 rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx); 1948 1955 TMNotifyEndOfExecution(pVM); 1949 1956 1950 1957 /* In case we execute a goto ResumeExecution later on. */ 1951 pV M->hwaccm.s.vmx.fResumeVM = true;1952 pV M->hwaccm.s.fForceTLBFlush = false;1958 pVCpu->hwaccm.s.fResumeVM = true; 1959 pVCpu->hwaccm.s.fForceTLBFlush = false; 1953 1960 1954 1961 /* … … 1963 1970 if (rc != VINF_SUCCESS) 1964 1971 { 1965 VMXR0ReportWorldSwitchError(pVM, rc, pCtx);1972 VMXR0ReportWorldSwitchError(pVM, pVCpu, rc, pCtx); 1966 1973 goto end; 1967 1974 } … … 1995 2002 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_INFO, &val); 1996 2003 AssertRC(rc); 1997 pV M->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);1998 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pV M->hwaccm.s.Event.intInfo)1999 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pV M->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW)2000 { 2001 pV M->hwaccm.s.Event.fPending = true;2004 pVCpu->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val); 2005 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hwaccm.s.Event.intInfo) 2006 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW) 2007 { 2008 pVCpu->hwaccm.s.Event.fPending = true; 2002 2009 /* Error code present? */ 2003 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pV M->hwaccm.s.Event.intInfo))2010 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hwaccm.s.Event.intInfo)) 2004 2011 { 2005 2012 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_ERRCODE, &val); 2006 2013 AssertRC(rc); 2007 pV M->hwaccm.s.Event.errCode = val;2008 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x pending error=%RX64\n", pV M->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));2014 pVCpu->hwaccm.s.Event.errCode = val; 2015 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x pending error=%RX64\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val)); 2009 2016 } 2010 2017 else 2011 2018 { 2012 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x\n", pV M->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));2013 pV M->hwaccm.s.Event.errCode = 0;2019 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%08x\n", pVCpu->hwaccm.s.Event.intInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification)); 2020 pVCpu->hwaccm.s.Event.errCode = 0; 2014 2021 } 2015 2022 } … … 2081 2088 /* Continue execution. */ 2082 2089 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x); 2083 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;2090 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 2084 2091 2085 2092 goto ResumeExecution; … … 2088 2095 Log(("Forward #NM fault to the guest\n")); 2089 2096 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM); 2090 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0);2097 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0); 2091 2098 AssertRC(rc); 2092 2099 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x); … … 2109 2116 /* Now we must update CR2. */ 2110 2117 pCtx->cr2 = exitQualification; 2111 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);2118 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2112 2119 AssertRC(rc); 2113 2120 … … 2152 2159 /* Now we must update CR2. */ 2153 2160 pCtx->cr2 = exitQualification; 2154 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);2161 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2155 2162 AssertRC(rc); 2156 2163 … … 2178 2185 } 2179 2186 Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip)); 2180 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);2187 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2181 2188 AssertRC(rc); 2182 2189 … … 2226 2233 2227 2234 Log(("Trap %x (debug) at %RGv exit qualification %RX64\n", vector, (RTGCPTR)pCtx->rip, exitQualification)); 2228 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);2235 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2229 2236 AssertRC(rc); 2230 2237 … … 2245 2252 { 2246 2253 Log(("Trap %x at %04X:%RGv errorCode=%x\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip, errCode)); 2247 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);2254 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2248 2255 AssertRC(rc); 2249 2256 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x); … … 2260 2267 2261 2268 /* lidt, lgdt can end up here. In the future crx changes as well. Just reload the whole context to be done with it. */ 2262 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;2269 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL; 2263 2270 2264 2271 /* Only resume if successful. */ … … 2293 2300 2294 2301 Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs, (RTGCPTR)pCtx->rip)); 2295 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);2302 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2296 2303 AssertRC(rc); 2297 2304 … … 2305 2312 { 2306 2313 Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs, pCtx->eip, errCode)); 2307 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);2314 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2308 2315 AssertRC(rc); 2309 2316 … … 2501 2508 { 2502 2509 case 0: 2503 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3;2510 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0 | HWACCM_CHANGED_GUEST_CR3; 2504 2511 break; 2505 2512 case 2: … … 2507 2514 case 3: 2508 2515 Assert(!pVM->hwaccm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx)); 2509 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;2516 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3; 2510 2517 break; 2511 2518 case 4: 2512 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;2519 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4; 2513 2520 break; 2514 2521 case 8: … … 2548 2555 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCLTS); 2549 2556 rc = EMInterpretCLTS(pVM); 2550 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;2557 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 2551 2558 break; 2552 2559 … … 2555 2562 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitLMSW); 2556 2563 rc = EMInterpretLMSW(pVM, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)); 2557 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;2564 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 2558 2565 break; 2559 2566 } … … 2606 2613 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 2607 2614 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)); 2608 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;2615 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG; 2609 2616 Log2(("DR7=%08x\n", pCtx->dr[7])); 2610 2617 } … … 2756 2763 2757 2764 Log(("Inject IO debug trap at %RGv\n", (RTGCPTR)pCtx->rip)); 2758 rc = VMXR0InjectEvent(pVM, p Ctx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 0, 0);2765 rc = VMXR0InjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), 0, 0); 2759 2766 AssertRC(rc); 2760 2767 … … 2947 2954 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq); 2948 2955 /* On the next entry we'll only sync the host context. */ 2949 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;2956 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT; 2950 2957 } 2951 2958 else … … 2954 2961 /** @todo we can do better than this */ 2955 2962 /* Not in the VINF_PGM_CHANGE_MODE though! */ 2956 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;2963 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL; 2957 2964 } 2958 2965 … … 2964 2971 if (rc == VERR_VMX_INVALID_VMCS_PTR) 2965 2972 { 2966 VMXGetActivateVMCS(&pV M->hwaccm.s.vmx.lasterror.u64VMCSPhys);2967 pV M->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pVMCS;2973 VMXGetActivateVMCS(&pVCpu->hwaccm.s.vmx.lasterror.u64VMCSPhys); 2974 pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pVMCS; 2968 2975 } 2969 2976 … … 2980 2987 * @returns VBox status code. 2981 2988 * @param pVM The VM to operate on. 2982 * @param idVCpu VPCPU id.2989 * @param pVCpu The VMCPU to operate on. 2983 2990 * @param pCpu CPU info struct 2984 2991 */ … … 2999 3006 return rc; 3000 3007 3001 pV M->hwaccm.s.vmx.fResumeVM = false;3008 pVCpu->hwaccm.s.fResumeVM = false; 3002 3009 return VINF_SUCCESS; 3003 3010 } … … 3009 3016 * @returns VBox status code. 3010 3017 * @param pVM The VM to operate on. 3011 * @param idVCpu VPCPU id.3018 * @param pVCpu The VMCPU to operate on. 3012 3019 * @param pCtx CPU context 3013 3020 */ … … 3027 3034 3028 3035 /* Resync the debug registers the next time. */ 3029 pV M->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG;3036 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_DEBUG; 3030 3037 } 3031 3038 else … … 3044 3051 * @returns VBox status code. 3045 3052 * @param pVM The VM to operate on. 3053 * @param pVCpu The VM CPU to operate on. 3046 3054 * @param enmFlush Type of flush 3047 3055 * @param GCPhys Physical address of the page to flush 3048 3056 */ 3049 static void vmxR0FlushEPT(PVM pVM, VMX_FLUSH enmFlush, RTGCPHYS GCPhys)3057 static void vmxR0FlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPHYS GCPhys) 3050 3058 { 3051 3059 uint64_t descriptor[2]; … … 3053 3061 LogFlow(("vmxR0FlushEPT %d %RGv\n", enmFlush, GCPhys)); 3054 3062 Assert(pVM->hwaccm.s.fNestedPaging); 3055 /* @todo SMP */ 3056 descriptor[0] = pVM->aCpus[0].hwaccm.s.vmx.GCPhysEPTP; 3063 descriptor[0] = pVCpu->hwaccm.s.vmx.GCPhysEPTP; 3057 3064 descriptor[1] = GCPhys; 3058 3065 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]); … … 3066 3073 * @returns VBox status code. 3067 3074 * @param pVM The VM to operate on. 3075 * @param pVCpu The VM CPU to operate on. 3068 3076 * @param enmFlush Type of flush 3069 3077 * @param GCPtr Virtual address of the page to flush 3070 3078 */ 3071 static void vmxR0FlushVPID(PVM pVM, VMX_FLUSH enmFlush, RTGCPTR GCPtr)3079 static void vmxR0FlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH enmFlush, RTGCPTR GCPtr) 3072 3080 { 3073 3081 uint64_t descriptor[2]; 3074 3082 3075 3083 Assert(pVM->hwaccm.s.vmx.fVPID); 3076 descriptor[0] = pV M->hwaccm.s.uCurrentASID;3084 descriptor[0] = pVCpu->hwaccm.s.uCurrentASID; 3077 3085 descriptor[1] = GCPtr; 3078 3086 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); … … 3086 3094 * @returns VBox status code. 3087 3095 * @param pVM The VM to operate on. 3096 * @param pVCpu The VM CPU to operate on. 3088 3097 * @param GCVirt Page to invalidate 3089 3098 */ 3090 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, RTGCPTR GCVirt)3099 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt) 3091 3100 { 3092 bool fFlushPending = pV M->hwaccm.s.fForceTLBFlush;3101 bool fFlushPending = pVCpu->hwaccm.s.fForceTLBFlush; 3093 3102 3094 3103 LogFlow(("VMXR0InvalidatePage %RGv\n", GCVirt)); … … 3102 3111 if ( !fFlushPending 3103 3112 && pVM->hwaccm.s.vmx.fVPID) 3104 vmxR0FlushVPID(pVM, pV M->hwaccm.s.vmx.enmFlushPage, GCVirt);3113 vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, GCVirt); 3105 3114 #endif /* HWACCM_VTX_WITH_VPID */ 3106 3115 … … 3115 3124 * @returns VBox status code. 3116 3125 * @param pVM The VM to operate on. 3126 * @param pVCpu The VM CPU to operate on. 3117 3127 * @param GCPhys Page to invalidate 3118 3128 */ 3119 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys)3129 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys) 3120 3130 { 3121 bool fFlushPending = pV M->hwaccm.s.fForceTLBFlush;3131 bool fFlushPending = pVCpu->hwaccm.s.fForceTLBFlush; 3122 3132 3123 3133 Assert(pVM->hwaccm.s.fNestedPaging); … … 3127 3137 /* Skip it if a TLB flush is already pending. */ 3128 3138 if (!fFlushPending) 3129 vmxR0FlushEPT(pVM, pV M->hwaccm.s.vmx.enmFlushPage, GCPhys);3139 vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushPage, GCPhys); 3130 3140 3131 3141 return VINF_SUCCESS; … … 3136 3146 * 3137 3147 * @param pVM The VM to operate on. 3148 * @param pVCpu The VMCPU to operate on. 3138 3149 * @param rc Return code 3139 3150 * @param pCtx Current CPU context (not updated) 3140 3151 */ 3141 static void VMXR0ReportWorldSwitchError(PVM pVM, int rc, PCPUMCTX pCtx)3152 static void VMXR0ReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rc, PCPUMCTX pCtx) 3142 3153 { 3143 3154 switch (rc) … … 3161 3172 Log(("Current stack %08x\n", &rc)); 3162 3173 3163 pV M->hwaccm.s.vmx.lasterror.ulLastInstrError = instrError;3164 pV M->hwaccm.s.vmx.lasterror.ulLastExitReason = exitReason;3174 pVCpu->hwaccm.s.vmx.lasterror.ulLastInstrError = instrError; 3175 pVCpu->hwaccm.s.vmx.lasterror.ulLastExitReason = exitReason; 3165 3176 3166 3177 #ifdef VBOX_STRICT -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.h
r13879 r13883 47 47 * @returns VBox status code. 48 48 * @param pVM The VM to operate on. 49 * @param pVCpu VPCPU id.49 * @param pVCpu The VMCPU to operate on. 50 50 * @param pCpu CPU info struct 51 51 */ … … 57 57 * @returns VBox status code. 58 58 * @param pVM The VM to operate on. 59 * @param pVCpu VPCPU id.59 * @param pVCpu The VMCPU to operate on. 60 60 * @param pCtx CPU context 61 61 */ … … 114 114 * @returns VBox status code. 115 115 * @param pVM The VM to operate on. 116 * @param pVCpu VPCPU id.116 * @param pVCpu The VMCPU to operate on. 117 117 */ 118 118 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu); … … 123 123 * @returns VBox status code. 124 124 * @param pVM The VM to operate on. 125 * @param pVCpu VPCPU id.125 * @param pVCpu The VMCPU to operate on. 126 126 * @param pCtx Guest context 127 127 */ … … 134 134 * @returns VBox status code. 135 135 * @param pVM The VM to operate on. 136 * @param pVCpu VPCPU id.136 * @param pVCpu The VMCPU to operate on. 137 137 * @param pCtx Guest context 138 138 */ -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r13796 r13883 264 264 CHECK_MEMBER_ALIGNMENT(HWACCM, u64RegisterMask, 8); 265 265 CHECK_MEMBER_ALIGNMENT(HWACCM, vmx.hostCR4, 8); 266 CHECK_MEMBER_ALIGNMENT(HWACCM , Event.intInfo, 8);266 CHECK_MEMBER_ALIGNMENT(HWACCMCPU, Event.intInfo, 8); 267 267 CHECK_MEMBER_ALIGNMENT(HWACCM, StatEntry, 8); 268 268
Note:
See TracChangeset
for help on using the changeset viewer.