Changeset 18927 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Apr 16, 2009 11:41:38 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r16776 r18927 222 222 223 223 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */ 224 int rc = CPUMHandleLazyFPU(pV M, pVCpu);224 int rc = CPUMHandleLazyFPU(pVCpu); 225 225 AssertRC(rc); 226 226 Assert(CPUMIsGuestFPUStateActive(pVCpu)); -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r18008 r18927 954 954 ASMAtomicWriteBool(&pCpu->fInUse, true); 955 955 956 pCtx = CPUMQueryGuestCtxPtr Ex(pVM,pVCpu);956 pCtx = CPUMQueryGuestCtxPtr(pVCpu); 957 957 958 958 /* Always load the guest's FPU/XMM state on-demand. */ 959 CPUMDeactivateGuestFPUState(pV M);959 CPUMDeactivateGuestFPUState(pVCpu); 960 960 961 961 /* Always load the guest's debug state on-demand. */ 962 CPUMDeactivateGuestDebugState(pV M);962 CPUMDeactivateGuestDebugState(pVCpu); 963 963 964 964 /* Always reload the host context and the guest's CR0 register. (!!!!) */ … … 1009 1009 AssertReturn(!ASMAtomicReadBool(&HWACCMR0Globals.fSuspended), VERR_HWACCM_SUSPEND_PENDING); 1010 1010 1011 pCtx = CPUMQueryGuestCtxPtr Ex(pVM,pVCpu);1011 pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1012 1012 1013 1013 /* Note: It's rather tricky with longjmps done by e.g. Log statements or the page fault handler. … … 1067 1067 #endif 1068 1068 1069 pCtx = CPUMQueryGuestCtxPtr Ex(pVM,pVCpu);1069 pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1070 1070 1071 1071 rc = HWACCMR0Globals.pfnRunGuestCode(pVM, pVCpu, pCtx); … … 1124 1124 int rc; 1125 1125 1126 pCtx = CPUMQueryGuestCtxPtr Ex(pVM,pVCpu);1126 pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1127 1127 1128 1128 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); … … 1374 1374 * 1375 1375 * @param pVM The VM to operate on. 1376 * @param pVCpu The VMCPU to operate on. 1376 1377 * @param pCtx The context to format. 1377 1378 */ 1378 VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, P CPUMCTX pCtx)1379 VMMR0DECL(void) HWACCMDumpRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1379 1380 { 1380 1381 /* … … 1421 1422 * Format the registers. 1422 1423 */ 1423 if (CPUMIsGuestIn64BitCode(pV M, CPUMCTX2CORE(pCtx)))1424 if (CPUMIsGuestIn64BitCode(pVCpu, CPUMCTX2CORE(pCtx))) 1424 1425 { 1425 1426 Log(("rax=%016RX64 rbx=%016RX64 rcx=%016RX64 rdx=%016RX64\n" -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r18781 r18927 49 49 * Internal Functions * 50 50 *******************************************************************************/ 51 static int SVMR0InterpretInvpg(PVM pVM, P CPUMCTXCORE pRegFrame, uint32_t uASID);51 static int SVMR0InterpretInvpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID); 52 52 53 53 /******************************************************************************* … … 671 671 enmShwPagingMode = PGMGetHostMode(pVM); 672 672 673 pVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pV M, enmShwPagingMode);673 pVMCB->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode); 674 674 Assert(pVMCB->ctrl.u64NestedPagingCR3); 675 675 pVMCB->guest.u64CR3 = pCtx->cr3; … … 677 677 else 678 678 { 679 pVMCB->guest.u64CR3 = PGMGetHyperCR3(pV M);679 pVMCB->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 680 680 Assert(pVMCB->guest.u64CR3 || VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)); 681 681 } … … 736 736 /* Sync the debug state now if any breakpoint is armed. */ 737 737 if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK|X86_DR7_GD)) 738 && !CPUMIsGuestDebugStateActive(pV M)738 && !CPUMIsGuestDebugStateActive(pVCpu) 739 739 && !DBGFIsStepping(pVM)) 740 740 { … … 869 869 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)) 870 870 { 871 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM )));872 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM ))871 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM, pVCpu))); 872 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM, pVCpu)) 873 873 { 874 874 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here. … … 1071 1071 if (exitCode == (uint64_t)SVM_EXIT_INVALID) /* Invalid guest state. */ 1072 1072 { 1073 HWACCMDumpRegs(pVM, p Ctx);1073 HWACCMDumpRegs(pVM, pVCpu, pCtx); 1074 1074 #ifdef DEBUG 1075 1075 Log(("ctrl.u16InterceptRdCRx %x\n", pVMCB->ctrl.u16InterceptRdCRx)); … … 1235 1235 && pCtx->cr3 != pVMCB->guest.u64CR3) 1236 1236 { 1237 CPUMSetGuestCR3(pV M, pVMCB->guest.u64CR3);1238 PGMUpdateCR3(pVM, pV MCB->guest.u64CR3);1237 CPUMSetGuestCR3(pVCpu, pVMCB->guest.u64CR3); 1238 PGMUpdateCR3(pVM, pVCpu, pVMCB->guest.u64CR3); 1239 1239 } 1240 1240 … … 1245 1245 { 1246 1246 Log(("uInterruptState %x rip=%RGv\n", pVMCB->ctrl.u64IntShadow, (RTGCPTR)pCtx->rip)); 1247 EMSetInhibitInterruptsPC(pVM, p Ctx->rip);1247 EMSetInhibitInterruptsPC(pVM, pVCpu, pCtx->rip); 1248 1248 } 1249 1249 else … … 1413 1413 1414 1414 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */ 1415 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress);1415 rc = PGMTrap0eHandler(pVM, pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress); 1416 1416 Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 1417 1417 if (rc == VINF_SUCCESS) … … 1558 1558 enmShwPagingMode = PGMGetHostMode(pVM); 1559 1559 1560 rc = PGMR0Trap0eHandlerNestedPaging(pVM, enmShwPagingMode, errCode, CPUMCTX2CORE(pCtx), uFaultAddress);1560 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, enmShwPagingMode, errCode, CPUMCTX2CORE(pCtx), uFaultAddress); 1561 1561 Log2(("PGMR0Trap0eHandlerNestedPaging %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 1562 1562 if (rc == VINF_SUCCESS) … … 1609 1609 Log2(("SVM: Cpuid at %RGv for %x\n", (RTGCPTR)pCtx->rip, pCtx->eax)); 1610 1610 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCpuid); 1611 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));1611 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 1612 1612 if (rc == VINF_SUCCESS) 1613 1613 { … … 1626 1626 Log2(("SVM: Rdtsc\n")); 1627 1627 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc); 1628 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));1628 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 1629 1629 if (rc == VINF_SUCCESS) 1630 1630 { … … 1642 1642 Log2(("SVM: Rdpmc %x\n", pCtx->ecx)); 1643 1643 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdpmc); 1644 rc = EMInterpretRdpmc(pVM, CPUMCTX2CORE(pCtx));1644 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 1645 1645 if (rc == VINF_SUCCESS) 1646 1646 { … … 1657 1657 Log2(("SVM: Rdtscp\n")); 1658 1658 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc); 1659 rc = EMInterpretRdtscp(pVM, p Ctx);1659 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx); 1660 1660 if (rc == VINF_SUCCESS) 1661 1661 { … … 1678 1678 1679 1679 /* Truly a pita. Why can't SVM give the same information as VT-x? */ 1680 rc = SVMR0InterpretInvpg(pVM, CPUMCTX2CORE(pCtx), pVMCB->ctrl.TLBCtrl.n.u32ASID);1680 rc = SVMR0InterpretInvpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), pVMCB->ctrl.TLBCtrl.n.u32ASID); 1681 1681 if (rc == VINF_SUCCESS) 1682 1682 { … … 1696 1696 Log2(("SVM: %RGv mov cr%d, \n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_WRITE_CR0)); 1697 1697 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[exitCode - SVM_EXIT_WRITE_CR0]); 1698 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);1698 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize); 1699 1699 1700 1700 switch (exitCode - SVM_EXIT_WRITE_CR0) … … 1721 1721 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)) 1722 1722 { 1723 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));1723 rc = PGMSyncCR3(pVM, pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)); 1724 1724 AssertRC(rc); 1725 1725 … … 1727 1727 1728 1728 /* Must be set by PGMSyncCR3 */ 1729 Assert(rc != VINF_SUCCESS || PGMGetGuestMode(pV M) <= PGMMODE_PROTECTED || pVCpu->hwaccm.s.fForceTLBFlush);1729 Assert(rc != VINF_SUCCESS || PGMGetGuestMode(pVCpu) <= PGMMODE_PROTECTED || pVCpu->hwaccm.s.fForceTLBFlush); 1730 1730 } 1731 1731 if (rc == VINF_SUCCESS) … … 1750 1750 Log2(("SVM: %RGv mov x, cr%d\n", (RTGCPTR)pCtx->rip, exitCode - SVM_EXIT_READ_CR0)); 1751 1751 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxRead[exitCode - SVM_EXIT_READ_CR0]); 1752 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);1752 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize); 1753 1753 if (rc == VINF_SUCCESS) 1754 1754 { … … 1789 1789 } 1790 1790 1791 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);1791 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize); 1792 1792 if (rc == VINF_SUCCESS) 1793 1793 { … … 1829 1829 } 1830 1830 1831 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);1831 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize); 1832 1832 if (rc == VINF_SUCCESS) 1833 1833 { … … 1881 1881 1882 1882 /* Disassemble manually to deal with segment prefixes. */ 1883 rc = EMInterpretDisasOne(pVM, CPUMCTX2CORE(pCtx), &Cpu, NULL);1883 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu, NULL); 1884 1884 if (rc == VINF_SUCCESS) 1885 1885 { … … 1949 1949 SVM_EVENT Event; 1950 1950 1951 Assert(CPUMIsGuestDebugStateActive(pV M));1951 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 1952 1952 1953 1953 /* Clear all breakpoint status flags and set the one we just hit. */ … … 2048 2048 /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */ 2049 2049 Log(("SVM: %s\n", (pVMCB->ctrl.u64ExitInfo1 == 0) ? "rdmsr" : "wrmsr")); 2050 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);2050 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize); 2051 2051 if (rc == VINF_SUCCESS) 2052 2052 { … … 2092 2092 2093 2093 /* Signal changes for the recompiler. */ 2094 CPUMSetChangedFlags(pV M, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);2094 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS); 2095 2095 2096 2096 /* If we executed vmrun and an external irq was pending, then we don't have to do a full sync the next time. */ … … 2154 2154 2155 2155 /* Save the guest debug state if necessary. */ 2156 if (CPUMIsGuestDebugStateActive(pV M))2156 if (CPUMIsGuestDebugStateActive(pVCpu)) 2157 2157 { 2158 2158 CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, false /* skip DR6 */); … … 2172 2172 2173 2173 2174 static int svmR0InterpretInvlPg(PVM pVM, P DISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID)2174 static int svmR0InterpretInvlPg(PVM pVM, PVMCPU pVCpu, PDISCPUSTATE pCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID) 2175 2175 { 2176 2176 OP_PARAMVAL param1; … … 2197 2197 * (in absence of segment override prefixes)???? 2198 2198 */ 2199 rc = PGMInvalidatePage(pVM, addr);2199 rc = PGMInvalidatePage(pVM, pVCpu, addr); 2200 2200 if (RT_SUCCESS(rc)) 2201 2201 { … … 2223 2223 * Updates the EIP if an instruction was executed successfully. 2224 2224 */ 2225 static int SVMR0InterpretInvpg(PVM pVM, P CPUMCTXCORE pRegFrame, uint32_t uASID)2225 static int SVMR0InterpretInvpg(PVM pVM, PVMCPU pVCpu, PCPUMCTXCORE pRegFrame, uint32_t uASID) 2226 2226 { 2227 2227 /* … … 2239 2239 2240 2240 Cpu.mode = enmMode; 2241 rc = EMInterpretDisasOneEx(pVM, p bCode, pRegFrame, &Cpu, &cbOp);2241 rc = EMInterpretDisasOneEx(pVM, pVCpu, pbCode, pRegFrame, &Cpu, &cbOp); 2242 2242 Assert(RT_FAILURE(rc) || Cpu.pCurInstr->opcode == OP_INVLPG); 2243 2243 if (RT_SUCCESS(rc) && Cpu.pCurInstr->opcode == OP_INVLPG) 2244 2244 { 2245 2245 Assert(cbOp == Cpu.opsize); 2246 rc = svmR0InterpretInvlPg(pVM, &Cpu, pRegFrame, uASID);2246 rc = svmR0InterpretInvlPg(pVM, pVCpu, &Cpu, pRegFrame, uASID); 2247 2247 if (RT_SUCCESS(rc)) 2248 2248 { … … 2285 2285 #if HC_ARCH_BITS == 32 2286 2286 /* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invlpga takes only 32 bits addresses. */ 2287 if (CPUMIsGuestInLongMode(pV M))2287 if (CPUMIsGuestInLongMode(pVCpu)) 2288 2288 pVCpu->hwaccm.s.fForceTLBFlush = true; 2289 2289 else … … 2351 2351 RTHCUINTREG uFlags; 2352 2352 2353 /* @todo This code is not guest SMP safe (hyper context) */2353 /* @todo This code is not guest SMP safe (hyper stack) */ 2354 2354 AssertReturn(pVM->cCPUs == 1, VERR_ACCESS_DENIED); 2355 2355 Assert(pfnHandler); … … 2357 2357 uFlags = ASMIntDisableFlags(); 2358 2358 2359 CPUMSetHyperESP(pV M, VMMGetStackRC(pVM));2360 CPUMSetHyperEIP(pV M, pfnHandler);2359 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVM)); 2360 CPUMSetHyperEIP(pVCpu, pfnHandler); 2361 2361 for (int i=(int)cbParam-1;i>=0;i--) 2362 CPUMPushHyper(pV M, paParam[i]);2362 CPUMPushHyper(pVCpu, paParam[i]); 2363 2363 2364 2364 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r18781 r18927 1056 1056 for (unsigned i=0;i<4;i++) 1057 1057 { 1058 Pdpe = PGMGstGetPaePDPtr(pV M, i);1058 Pdpe = PGMGstGetPaePDPtr(pVCpu, i); 1059 1059 int rc = VMXWriteVMCS64(VMX_VMCS_GUEST_PDPTR0_FULL + i*2, Pdpe.u); 1060 1060 AssertRC(rc); … … 1181 1181 if (pVM->hwaccm.s.vmx.pRealModeTSS) 1182 1182 { 1183 PGMMODE enmGuestMode = PGMGetGuestMode(pV M);1183 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu); 1184 1184 if (pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode != enmGuestMode) 1185 1185 { … … 1491 1491 if (pVM->hwaccm.s.fNestedPaging) 1492 1492 { 1493 Assert(PGMGetHyperCR3(pV M));1494 pVCpu->hwaccm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pV M);1493 Assert(PGMGetHyperCR3(pVCpu)); 1494 pVCpu->hwaccm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu); 1495 1495 1496 1496 Assert(!(pVCpu->hwaccm.s.vmx.GCPhysEPTP & 0xfff)); … … 1525 1525 else 1526 1526 { 1527 val = PGMGetHyperCR3(pV M);1527 val = PGMGetHyperCR3(pVCpu); 1528 1528 Assert(val || VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1529 1529 } … … 1550 1550 /* Sync the debug state now if any breakpoint is armed. */ 1551 1551 if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK|X86_DR7_GD)) 1552 && !CPUMIsGuestDebugStateActive(pV M)1552 && !CPUMIsGuestDebugStateActive(pVCpu) 1553 1553 && !DBGFIsStepping(pVM)) 1554 1554 { … … 1693 1693 Assert(uInterruptState <= 2); /* only sti & mov ss */ 1694 1694 Log(("uInterruptState %x eip=%RGv\n", uInterruptState, pCtx->rip)); 1695 EMSetInhibitInterruptsPC(pVM, p Ctx->rip);1695 EMSetInhibitInterruptsPC(pVM, pVCpu, pCtx->rip); 1696 1696 } 1697 1697 else … … 1702 1702 VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR0, &val); 1703 1703 val = (valShadow & pVCpu->hwaccm.s.vmx.cr0_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr0_mask); 1704 CPUMSetGuestCR0(pV M, val);1704 CPUMSetGuestCR0(pVCpu, val); 1705 1705 1706 1706 VMXReadCachedVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow); 1707 1707 VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR4, &val); 1708 1708 val = (valShadow & pVCpu->hwaccm.s.vmx.cr4_mask) | (val & ~pVCpu->hwaccm.s.vmx.cr4_mask); 1709 CPUMSetGuestCR4(pV M, val);1709 CPUMSetGuestCR4(pVCpu, val); 1710 1710 1711 1711 /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */ … … 1717 1717 1718 1718 /* Can be updated behind our back in the nested paging case. */ 1719 CPUMSetGuestCR2(pV M, pCache->cr2);1719 CPUMSetGuestCR2(pVCpu, pCache->cr2); 1720 1720 1721 1721 VMXReadCachedVMCS(VMX_VMCS64_GUEST_CR3, &val); … … 1723 1723 if (val != pCtx->cr3) 1724 1724 { 1725 CPUMSetGuestCR3(pV M, val);1726 PGMUpdateCR3(pVM, val);1725 CPUMSetGuestCR3(pVCpu, val); 1726 PGMUpdateCR3(pVM, pVCpu, val); 1727 1727 } 1728 1728 /* Prefetch the four PDPT entries in PAE mode. */ … … 2036 2036 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)) 2037 2037 { 2038 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM )));2039 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM ))2038 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVM, pVCpu))); 2039 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM, pVCpu)) 2040 2040 { 2041 2041 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here. … … 2276 2276 2277 2277 if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE) 2278 HWACCMDumpRegs(pVM, p Ctx);2278 HWACCMDumpRegs(pVM, pVCpu, pCtx); 2279 2279 #endif 2280 2280 … … 2388 2388 2389 2389 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */ 2390 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);2390 rc = PGMTrap0eHandler(pVM, pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification); 2391 2391 Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 2392 2392 if (rc == VINF_SUCCESS) … … 2538 2538 LogFlow(("Real mode X86_XCPT_GP instruction emulation at %RGv\n", (RTGCPTR)pCtx->rip)); 2539 2539 2540 rc = EMInterpretDisasOne(pVM, CPUMCTX2CORE(pCtx), &Cpu, &cbOp);2540 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu, &cbOp); 2541 2541 if (RT_SUCCESS(rc)) 2542 2542 { … … 2735 2735 2736 2736 default: 2737 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &cbSize);2737 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &cbSize); 2738 2738 break; 2739 2739 } … … 2853 2853 2854 2854 /* Handle the pagefault trap for the nested shadow table. */ 2855 rc = PGMR0Trap0eHandlerNestedPaging(pVM, PGMMODE_EPT, errCode, CPUMCTX2CORE(pCtx), GCPhys);2855 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, errCode, CPUMCTX2CORE(pCtx), GCPhys); 2856 2856 Log2(("PGMR0Trap0eHandlerNestedPaging %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, rc)); 2857 2857 if (rc == VINF_SUCCESS) … … 2908 2908 Log2(("VMX: Cpuid %x\n", pCtx->eax)); 2909 2909 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCpuid); 2910 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));2910 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2911 2911 if (rc == VINF_SUCCESS) 2912 2912 { … … 2925 2925 Log2(("VMX: Rdpmc %x\n", pCtx->ecx)); 2926 2926 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdpmc); 2927 rc = EMInterpretRdpmc(pVM, CPUMCTX2CORE(pCtx));2927 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2928 2928 if (rc == VINF_SUCCESS) 2929 2929 { … … 2941 2941 Log2(("VMX: Rdtsc\n")); 2942 2942 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitRdtsc); 2943 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));2943 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 2944 2944 if (rc == VINF_SUCCESS) 2945 2945 { … … 2959 2959 2960 2960 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitInvpg); 2961 rc = EMInterpretInvlpg(pVM, CPUMCTX2CORE(pCtx), exitQualification);2961 rc = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), exitQualification); 2962 2962 if (rc == VINF_SUCCESS) 2963 2963 { … … 2977 2977 /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */ 2978 2978 Log2(("VMX: %s\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr")); 2979 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);2979 rc = EMInterpretInstruction(pVM, pVCpu, CPUMCTX2CORE(pCtx), 0, &cbSize); 2980 2980 if (rc == VINF_SUCCESS) 2981 2981 { … … 2998 2998 Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))); 2999 2999 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]); 3000 rc = EMInterpretCRxWrite(pVM, CPUMCTX2CORE(pCtx),3000 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 3001 3001 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification), 3002 3002 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification)); … … 3029 3029 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)) 3030 3030 { 3031 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));3031 rc = PGMSyncCR3(pVM, pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)); 3032 3032 AssertRC(rc); 3033 3033 } … … 3043 3043 Assert(VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)); 3044 3044 3045 rc = EMInterpretCRxRead(pVM, CPUMCTX2CORE(pCtx),3045 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 3046 3046 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification), 3047 3047 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)); … … 3051 3051 Log2(("VMX: clts\n")); 3052 3052 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitCLTS); 3053 rc = EMInterpretCLTS(pVM );3053 rc = EMInterpretCLTS(pVM, pVCpu); 3054 3054 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 3055 3055 break; … … 3058 3058 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification))); 3059 3059 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitLMSW); 3060 rc = EMInterpretLMSW(pVM, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));3060 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)); 3061 3061 pVCpu->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0; 3062 3062 break; … … 3107 3107 Log2(("VMX: mov drx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification))); 3108 3108 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxWrite); 3109 rc = EMInterpretDRxWrite(pVM, CPUMCTX2CORE(pCtx),3109 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx), 3110 3110 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), 3111 3111 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)); … … 3117 3117 Log2(("VMX: mov x, drx\n")); 3118 3118 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitDRxRead); 3119 rc = EMInterpretDRxRead(pVM, CPUMCTX2CORE(pCtx),3119 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx), 3120 3120 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification), 3121 3121 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification)); … … 3168 3168 /** @todo VMX_VMCS_EXIT_GUEST_LINEAR_ADDR contains the flat pointer operand of the instruction. */ 3169 3169 /** @todo VMX_VMCS32_RO_EXIT_INSTR_INFO also contains segment prefix info. */ 3170 rc = EMInterpretDisasOne(pVM, CPUMCTX2CORE(pCtx), &Cpu, NULL);3170 rc = EMInterpretDisasOne(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu, NULL); 3171 3171 if (rc == VINF_SUCCESS) 3172 3172 { … … 3236 3236 uint64_t uDR6; 3237 3237 3238 Assert(CPUMIsGuestDebugStateActive(pV M));3238 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 3239 3239 3240 3240 uDR6 = ASMGetDR6(); … … 3459 3459 3460 3460 /* Signal changes for the recompiler. */ 3461 CPUMSetChangedFlags(pV M, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);3461 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS); 3462 3462 3463 3463 /* If we executed vmlaunch/vmresume and an external irq was pending, then we don't have to do a full sync the next time. */ … … 3541 3541 3542 3542 /* Save the guest debug state if necessary. */ 3543 if (CPUMIsGuestDebugStateActive(pV M))3543 if (CPUMIsGuestDebugStateActive(pVCpu)) 3544 3544 { 3545 3545 CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, true /* save DR6 */); … … 3598 3598 #if HC_ARCH_BITS == 32 3599 3599 /* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invvpid probably takes only 32 bits addresses. (@todo) */ 3600 if ( CPUMIsGuestInLongMode(pV M)3600 if ( CPUMIsGuestInLongMode(pVCpu) 3601 3601 && !VMX_IS_64BIT_HOST_MODE()) 3602 3602 { … … 3919 3919 RTHCPHYS pPageCpuPhys; 3920 3920 3921 /* @todo This code is not guest SMP safe (hyper context) */3921 /* @todo This code is not guest SMP safe (hyper stack) */ 3922 3922 AssertReturn(pVM->cCPUs == 1, VERR_ACCESS_DENIED); 3923 3923 AssertReturn(pVM->hwaccm.s.pfnHost32ToGuest64R0, VERR_INTERNAL_ERROR); … … 3944 3944 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE); 3945 3945 3946 CPUMSetHyperESP(pV M, VMMGetStackRC(pVM));3947 CPUMSetHyperEIP(pV M, pfnHandler);3946 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVM)); 3947 CPUMSetHyperEIP(pVCpu, pfnHandler); 3948 3948 for (int i=(int)cbParam-1;i>=0;i--) 3949 CPUMPushHyper(pV M, paParam[i]);3949 CPUMPushHyper(pVCpu, paParam[i]); 3950 3950 3951 3951 STAM_PROFILE_ADV_START(&pVCpu->hwaccm.s.StatWorldSwitch3264, z); -
trunk/src/VBox/VMM/VMMR0/PDMR0Device.cpp
r18666 r18927 30 30 #include <VBox/mm.h> 31 31 #include <VBox/vm.h> 32 #include <VBox/vmm.h> 32 33 #include <VBox/patm.h> 33 34 #include <VBox/hwaccm.h> … … 70 71 static DECLCALLBACK(PVM) pdmR0DevHlp_GetVM(PPDMDEVINS pDevIns); 71 72 static DECLCALLBACK(bool) pdmR0DevHlp_CanEmulateIoBlock(PPDMDEVINS pDevIns); 73 static DECLCALLBACK(PVMCPU) pdmR0DevHlp_GetVMCPU(PPDMDEVINS pDevIns); 72 74 /** @} */ 73 75 … … 138 140 pdmR0DevHlp_GetVM, 139 141 pdmR0DevHlp_CanEmulateIoBlock, 142 pdmR0DevHlp_GetVMCPU, 140 143 PDM_DEVHLPR0_VERSION 141 144 }; … … 286 289 LogFlow(("pdmR0DevHlp_A20IsEnabled: caller=%p/%d:\n", pDevIns, pDevIns->iInstance)); 287 290 288 bool fEnabled = PGMPhysIsA20Enabled( pDevIns->Internal.s.pVMR0);291 bool fEnabled = PGMPhysIsA20Enabled(VMMGetCpu(pDevIns->Internal.s.pVMR0)); 289 292 290 293 Log(("pdmR0DevHlp_A20IsEnabled: caller=%p/%d: returns %RTbool\n", pDevIns, pDevIns->iInstance, fEnabled)); … … 362 365 PDMDEV_ASSERT_DEVINS(pDevIns); 363 366 LogFlow(("pdmR0DevHlp_GetVM: caller='%p'/%d\n", pDevIns, pDevIns->iInstance)); 364 return HWACCMCanEmulateIoBlock(pDevIns->Internal.s.pVMR0); 365 } 366 367 return HWACCMCanEmulateIoBlock(VMMGetCpu(pDevIns->Internal.s.pVMR0)); 368 } 369 370 371 /** @copydoc PDMDEVHLPR0::pfnGetVMCPU */ 372 static DECLCALLBACK(PVMCPU) pdmR0DevHlp_GetVMCPU(PPDMDEVINS pDevIns) 373 { 374 PDMDEV_ASSERT_DEVINS(pDevIns); 375 LogFlow(("pdmR0DevHlp_GetVMCPU: caller='%p'/%d\n", pDevIns, pDevIns->iInstance)); 376 return VMMGetCpu(pDevIns->Internal.s.pVMR0); 377 } 367 378 368 379 -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r18617 r18927 168 168 * @returns VBox status code (appropriate for trap handling and GC return). 169 169 * @param pVM VM Handle. 170 * @param pVCpu VMCPU Handle. 170 171 * @param enmShwPagingMode Paging mode for the nested page tables 171 172 * @param uErr The trap error code. … … 173 174 * @param pvFault The fault address. 174 175 */ 175 VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, P GMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault)176 VMMR0DECL(int) PGMR0Trap0eHandlerNestedPaging(PVM pVM, PVMCPU pVCpu, PGMMODE enmShwPagingMode, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPHYS pvFault) 176 177 { 177 178 int rc; 178 179 179 180 LogFlow(("PGMTrap0eHandler: uErr=%#x pvFault=%RGp eip=%RGv\n", uErr, pvFault, (RTGCPTR)pRegFrame->rip)); 180 STAM_PROFILE_START(&pV M->pgm.s.StatRZTrap0e, a);181 STAM_STATS({ pV M->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } );181 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0e, a); 182 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = NULL; } ); 182 183 183 184 /* AMD uses the host's paging mode; Intel has a single mode (EPT). */ … … 193 194 { 194 195 if (uErr & X86_TRAP_PF_RW) 195 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eUSNotPresentWrite);196 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentWrite); 196 197 else 197 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eUSNotPresentRead);198 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNotPresentRead); 198 199 } 199 200 else if (uErr & X86_TRAP_PF_RW) 200 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eUSWrite);201 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSWrite); 201 202 else if (uErr & X86_TRAP_PF_RSVD) 202 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eUSReserved);203 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSReserved); 203 204 else if (uErr & X86_TRAP_PF_ID) 204 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eUSNXE);205 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSNXE); 205 206 else 206 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eUSRead);207 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eUSRead); 207 208 } 208 209 else … … 211 212 { 212 213 if (uErr & X86_TRAP_PF_RW) 213 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eSVNotPresentWrite);214 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentWrite); 214 215 else 215 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eSVNotPresentRead);216 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVNotPresentRead); 216 217 } 217 218 else if (uErr & X86_TRAP_PF_RW) 218 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eSVWrite);219 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVWrite); 219 220 else if (uErr & X86_TRAP_PF_ID) 220 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eSNXE);221 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSNXE); 221 222 else if (uErr & X86_TRAP_PF_RSVD) 222 STAM_COUNTER_INC(&pV M->pgm.s.StatRZTrap0eSVReserved);223 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eSVReserved); 223 224 } 224 225 #endif … … 233 234 { 234 235 case PGMMODE_32_BIT: 235 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVM, uErr, pRegFrame, pvFault);236 rc = PGM_BTH_NAME_32BIT_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault); 236 237 break; 237 238 case PGMMODE_PAE: 238 239 case PGMMODE_PAE_NX: 239 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVM, uErr, pRegFrame, pvFault);240 rc = PGM_BTH_NAME_PAE_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault); 240 241 break; 241 242 case PGMMODE_AMD64: 242 243 case PGMMODE_AMD64_NX: 243 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVM, uErr, pRegFrame, pvFault);244 rc = PGM_BTH_NAME_AMD64_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault); 244 245 break; 245 246 case PGMMODE_EPT: 246 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVM, uErr, pRegFrame, pvFault);247 rc = PGM_BTH_NAME_EPT_PROT(Trap0eHandler)(pVM, pVCpu, uErr, pRegFrame, pvFault); 247 248 break; 248 249 default: … … 253 254 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 254 255 rc = VINF_SUCCESS; 255 STAM_STATS({ if (!pV M->pgm.s.CTX_SUFF(pStatTrap0eAttribution))256 pV M->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVM->pgm.s.StatRZTrap0eTime2Misc; });257 STAM_PROFILE_STOP_EX(&pV M->pgm.s.StatRZTrap0e, pVM->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a);256 STAM_STATS({ if (!pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution)) 257 pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2Misc; }); 258 STAM_PROFILE_STOP_EX(&pVCpu->pgm.s.StatRZTrap0e, pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution), a); 258 259 return rc; 259 260 } -
trunk/src/VBox/VMM/VMMR0/PGMR0Bth.h
r8977 r18927 25 25 *******************************************************************************/ 26 26 __BEGIN_DECLS 27 PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault);27 PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault); 28 28 __END_DECLS 29 29 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r18666 r18927 529 529 VMMR0DECL(void) VMMR0EntryFast(PVM pVM, unsigned idCpu, VMMR0OPERATION enmOperation) 530 530 { 531 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 532 531 533 if (RT_UNLIKELY(idCpu >= pVM->cCPUs)) 532 534 { … … 551 553 552 554 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 553 if (RT_UNLIKELY(!PGMGetHyperCR3(pV M)))555 if (RT_UNLIKELY(!PGMGetHyperCR3(pVCpu))) 554 556 { 555 557 pVM->vmm.s.iLastGZRc = VERR_PGM_NO_CR3_SHADOW_ROOT; … … 797 799 798 800 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 799 if (RT_UNLIKELY(!PGMGetHyperCR3( pVM)))801 if (RT_UNLIKELY(!PGMGetHyperCR3(VMMGetCpu0(pVM)))) 800 802 return VERR_PGM_NO_CR3_SHADOW_ROOT; 801 803 #endif
Note:
See TracChangeset
for help on using the changeset viewer.