Changeset 37323 in vbox
- Timestamp:
- Jun 3, 2011 4:20:06 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r37320 r37323 179 179 { 180 180 /* Allocate one page for the APIC physical page (serves for filtering accesses). */ 181 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjAPIC, 1 << PAGE_SHIFT, true /* executable R0 mapping */);181 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjAPIC, PAGE_SIZE, true /* executable R0 mapping */); 182 182 AssertRC(rc); 183 183 if (RT_FAILURE(rc)) … … 197 197 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 198 198 { 199 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjScratch, 1 << PAGE_SHIFT, true /* executable R0 mapping */);199 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjScratch, PAGE_SIZE, true /* executable R0 mapping */); 200 200 AssertRC(rc); 201 201 if (RT_FAILURE(rc)) … … 216 216 PVMCPU pVCpu = &pVM->aCpus[i]; 217 217 218 pVCpu->hwaccm.s.vmx. pMemObjVMCS = NIL_RTR0MEMOBJ;218 pVCpu->hwaccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ; 219 219 220 220 /* Allocate one page for the VM control structure (VMCS). */ 221 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx. pMemObjVMCS, 1 << PAGE_SHIFT, true /* executable R0 mapping */);221 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.hMemObjVMCS, PAGE_SIZE, true /* executable R0 mapping */); 222 222 AssertRC(rc); 223 223 if (RT_FAILURE(rc)) 224 224 return rc; 225 225 226 pVCpu->hwaccm.s.vmx.p VMCS = RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjVMCS);227 pVCpu->hwaccm.s.vmx. pVMCSPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVMCS, 0);228 ASMMemZero 32(pVCpu->hwaccm.s.vmx.pVMCS, PAGE_SIZE);226 pVCpu->hwaccm.s.vmx.pvVMCS = RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVMCS); 227 pVCpu->hwaccm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVMCS, 0); 228 ASMMemZeroPage(pVCpu->hwaccm.s.vmx.pvVMCS); 229 229 230 230 pVCpu->hwaccm.s.vmx.cr0_mask = 0; … … 232 232 233 233 /* Allocate one page for the virtual APIC page for TPR caching. */ 234 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx. pMemObjVAPIC, 1 << PAGE_SHIFT, true /* executable R0 mapping */);234 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.hMemObjVAPIC, PAGE_SIZE, true /* executable R0 mapping */); 235 235 AssertRC(rc); 236 236 if (RT_FAILURE(rc)) 237 237 return rc; 238 238 239 pVCpu->hwaccm.s.vmx.p VAPIC = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjVAPIC);240 pVCpu->hwaccm.s.vmx. pVAPICPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 0);241 ASMMemZero 32(pVCpu->hwaccm.s.vmx.pVAPIC, PAGE_SIZE);239 pVCpu->hwaccm.s.vmx.pbVAPIC = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.hMemObjVAPIC); 240 pVCpu->hwaccm.s.vmx.HCPhysVAPIC = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, 0); 241 ASMMemZeroPage(pVCpu->hwaccm.s.vmx.pbVAPIC); 242 242 243 243 /* Allocate the MSR bitmap if this feature is supported. */ 244 244 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 245 245 { 246 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 1 << PAGE_SHIFT, true /* executable R0 mapping */);246 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, PAGE_SIZE, true /* executable R0 mapping */); 247 247 AssertRC(rc); 248 248 if (RT_FAILURE(rc)) … … 256 256 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 257 257 /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */ 258 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */);258 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, PAGE_SIZE, true /* executable R0 mapping */); 259 259 AssertRC(rc); 260 260 if (RT_FAILURE(rc)) … … 266 266 267 267 /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */ 268 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */);268 rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjHostMSR, PAGE_SIZE, true /* executable R0 mapping */); 269 269 AssertRC(rc); 270 270 if (RT_FAILURE(rc)) … … 280 280 281 281 #ifdef LOG_ENABLED 282 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hwaccm.s.vmx.p VMCS, (uint32_t)pVCpu->hwaccm.s.vmx.pVMCSPhys);282 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hwaccm.s.vmx.pvVMCS, (uint32_t)pVCpu->hwaccm.s.vmx.HCPhysVMCS); 283 283 #endif 284 284 } … … 299 299 PVMCPU pVCpu = &pVM->aCpus[i]; 300 300 301 if (pVCpu->hwaccm.s.vmx. pMemObjVMCS != NIL_RTR0MEMOBJ)302 { 303 RTR0MemObjFree(pVCpu->hwaccm.s.vmx. pMemObjVMCS, false);304 pVCpu->hwaccm.s.vmx. pMemObjVMCS = NIL_RTR0MEMOBJ;305 pVCpu->hwaccm.s.vmx.p VMCS= 0;306 pVCpu->hwaccm.s.vmx. pVMCSPhys= 0;307 } 308 if (pVCpu->hwaccm.s.vmx. pMemObjVAPIC != NIL_RTR0MEMOBJ)309 { 310 RTR0MemObjFree(pVCpu->hwaccm.s.vmx. pMemObjVAPIC, false);311 pVCpu->hwaccm.s.vmx. pMemObjVAPIC = NIL_RTR0MEMOBJ;312 pVCpu->hwaccm.s.vmx.p VAPIC= 0;313 pVCpu->hwaccm.s.vmx. pVAPICPhys= 0;301 if (pVCpu->hwaccm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ) 302 { 303 RTR0MemObjFree(pVCpu->hwaccm.s.vmx.hMemObjVMCS, false); 304 pVCpu->hwaccm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ; 305 pVCpu->hwaccm.s.vmx.pvVMCS = 0; 306 pVCpu->hwaccm.s.vmx.HCPhysVMCS = 0; 307 } 308 if (pVCpu->hwaccm.s.vmx.hMemObjVAPIC != NIL_RTR0MEMOBJ) 309 { 310 RTR0MemObjFree(pVCpu->hwaccm.s.vmx.hMemObjVAPIC, false); 311 pVCpu->hwaccm.s.vmx.hMemObjVAPIC = NIL_RTR0MEMOBJ; 312 pVCpu->hwaccm.s.vmx.pbVAPIC = 0; 313 pVCpu->hwaccm.s.vmx.HCPhysVAPIC = 0; 314 314 } 315 315 if (pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ) … … 374 374 PVMCPU pVCpu = &pVM->aCpus[i]; 375 375 376 Assert (pVCpu->hwaccm.s.vmx.pVMCS);376 AssertPtr(pVCpu->hwaccm.s.vmx.pvVMCS); 377 377 378 378 /* Set revision dword at the beginning of the VMCS structure. */ 379 *(uint32_t *)pVCpu->hwaccm.s.vmx.p VMCS= MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);379 *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info); 380 380 381 381 /* Clear VM Control Structure. */ 382 Log((" pVMCSPhys = %RHp\n", pVCpu->hwaccm.s.vmx.pVMCSPhys));383 rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx. pVMCSPhys);382 Log(("HCPhysVMCS = %RHp\n", pVCpu->hwaccm.s.vmx.HCPhysVMCS)); 383 rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 384 384 if (RT_FAILURE(rc)) 385 385 goto vmx_end; 386 386 387 387 /* Activate the VM Control Structure. */ 388 rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx. pVMCSPhys);388 rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 389 389 if (RT_FAILURE(rc)) 390 390 goto vmx_end; … … 561 561 /* Optional */ 562 562 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0); 563 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hwaccm.s.vmx. pVAPICPhys);563 rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hwaccm.s.vmx.HCPhysVAPIC); 564 564 565 565 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) … … 574 574 575 575 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */ 576 rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx. pVMCSPhys);576 rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 577 577 AssertRC(rc); 578 578 … … 2320 2320 #endif 2321 2321 2322 Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || (pVCpu->hwaccm.s.vmx.p VAPIC && pVM->hwaccm.s.vmx.pAPIC));2322 Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) || (pVCpu->hwaccm.s.vmx.pbVAPIC && pVM->hwaccm.s.vmx.pAPIC)); 2323 2323 2324 2324 /* Check if we need to use TPR shadowing. */ … … 2551 2551 AssertRC(rc2); 2552 2552 /* The TPR can be found at offset 0x80 in the APIC mmio page. */ 2553 pVCpu->hwaccm.s.vmx.p VAPIC[0x80] = u8LastTPR;2553 pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = u8LastTPR; 2554 2554 2555 2555 /* Two options here: … … 2717 2717 { 2718 2718 Assert(pVM->hwaccm.s.fTPRPatchingActive); 2719 pVCpu->hwaccm.s.vmx.p VAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);2719 pVCpu->hwaccm.s.vmx.pbVAPIC[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 2720 2720 ASMWrMsr(MSR_K8_LSTAR, u64OldLSTAR); 2721 2721 } … … 2817 2817 /* Sync back the TPR if it was changed. */ 2818 2818 if ( fSetupTPRCaching 2819 && u8LastTPR != pVCpu->hwaccm.s.vmx.p VAPIC[0x80])2820 { 2821 rc2 = PDMApicSetTPR(pVCpu, pVCpu->hwaccm.s.vmx.p VAPIC[0x80]);2819 && u8LastTPR != pVCpu->hwaccm.s.vmx.pbVAPIC[0x80]) 2820 { 2821 rc2 = PDMApicSetTPR(pVCpu, pVCpu->hwaccm.s.vmx.pbVAPIC[0x80]); 2822 2822 AssertRC(rc2); 2823 2823 } … … 4236 4236 { 4237 4237 VMXGetActivateVMCS(&pVCpu->hwaccm.s.vmx.lasterror.u64VMCSPhys); 4238 pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.p VMCS;4238 pVCpu->hwaccm.s.vmx.lasterror.ulVMCSRevision = *(uint32_t *)pVCpu->hwaccm.s.vmx.pvVMCS; 4239 4239 pVCpu->hwaccm.s.vmx.lasterror.idEnteredCpu = pVCpu->hwaccm.s.idEnteredCpu; 4240 4240 pVCpu->hwaccm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId(); … … 4278 4278 4279 4279 /* Activate the VM Control Structure. */ 4280 int rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx. pVMCSPhys);4280 int rc = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4281 4281 if (RT_FAILURE(rc)) 4282 4282 return rc; … … 4324 4324 4325 4325 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */ 4326 int rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx. pVMCSPhys);4326 int rc = VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4327 4327 AssertRC(rc); 4328 4328 … … 4630 4630 #ifdef DEBUG 4631 4631 pCache->TestIn.HCPhysCpuPage= 0; 4632 pCache->TestIn. pVMCSPhys= 0;4632 pCache->TestIn.HCPhysVMCS = 0; 4633 4633 pCache->TestIn.pCache = 0; 4634 pCache->TestOut. pVMCSPhys= 0;4634 pCache->TestOut.HCPhysVMCS = 0; 4635 4635 pCache->TestOut.pCache = 0; 4636 4636 pCache->TestOut.pCtx = 0; … … 4640 4640 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */ 4641 4641 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */ 4642 aParam[2] = (uint32_t)(pVCpu->hwaccm.s.vmx. pVMCSPhys);/* Param 2: VMCS physical address - Lo. */4643 aParam[3] = (uint32_t)(pVCpu->hwaccm.s.vmx. pVMCSPhys >> 32);/* Param 2: VMCS physical address - Hi. */4642 aParam[2] = (uint32_t)(pVCpu->hwaccm.s.vmx.HCPhysVMCS); /* Param 2: VMCS physical address - Lo. */ 4643 aParam[3] = (uint32_t)(pVCpu->hwaccm.s.vmx.HCPhysVMCS >> 32); /* Param 2: VMCS physical address - Hi. */ 4644 4644 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache); 4645 4645 aParam[5] = 0; … … 4659 4659 #ifdef DEBUG 4660 4660 AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage)); 4661 AssertMsg(pCache->TestIn. pVMCSPhys == pVCpu->hwaccm.s.vmx.pVMCSPhys, ("%RHp vs %RHp\n", pCache->TestIn.pVMCSPhys, pVCpu->hwaccm.s.vmx.pVMCSPhys));4662 AssertMsg(pCache->TestIn. pVMCSPhys == pCache->TestOut.pVMCSPhys, ("%RHp vs %RHp\n", pCache->TestIn.pVMCSPhys, pCache->TestOut.pVMCSPhys));4661 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hwaccm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pVCpu->hwaccm.s.vmx.HCPhysVMCS)); 4662 AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS, pCache->TestOut.HCPhysVMCS)); 4663 4663 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache, pCache->TestOut.pCache)); 4664 4664 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache), ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hwaccm.s.vmx.VMCSCache))); … … 4707 4707 4708 4708 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */ 4709 VMXClearVMCS(pVCpu->hwaccm.s.vmx. pVMCSPhys);4709 VMXClearVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4710 4710 4711 4711 /* Leave VMX Root Mode. */ … … 4736 4736 } 4737 4737 4738 rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx. pVMCSPhys);4738 rc2 = VMXActivateVMCS(pVCpu->hwaccm.s.vmx.HCPhysVMCS); 4739 4739 AssertRC(rc2); 4740 4740 Assert(!(ASMGetFlags() & X86_EFL_IF)); -
trunk/src/VBox/VMM/VMMR3/HWACCM.cpp
r36912 r37323 1095 1095 { 1096 1096 LogRel(("HWACCM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.pMSRBitmapPhys)); 1097 LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx. pVMCSPhys));1097 LogRel(("HWACCM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS)); 1098 1098 } 1099 1099 … … 2593 2593 2594 2594 case VERR_VMX_INVALID_VMCS_PTR: 2595 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx. pVMCSPhys));2595 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current pointer %RGp vs %RGp\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.u64VMCSPhys, pVM->aCpus[i].hwaccm.s.vmx.HCPhysVMCS)); 2596 2596 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Current VMCS version %x\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.ulVMCSRevision)); 2597 2597 LogRel(("VERR_VMX_INVALID_VMCS_PTR: CPU%d Entered Cpu %d\n", i, pVM->aCpus[i].hwaccm.s.vmx.lasterror.idEnteredCpu)); -
trunk/src/VBox/VMM/VMMRC/HWACCMRCA.asm
r35346 r37323 91 91 ; * 92 92 ; * @returns VBox status code 93 ; * @param pPageCpuPhysVMXON physical address [rsp+8]94 ; * @param pVMCSPhysVMCS physical address [rsp+16]93 ; * @param HCPhysCpuPage VMXON physical address [rsp+8] 94 ; * @param HCPhysVMCS VMCS physical address [rsp+16] 95 95 ; * @param pCache VMCS cache [rsp+24] 96 96 ; * @param pCtx Guest context (rsi) … … 145 145 146 146 %ifdef DEBUG 147 mov rax, [rbp + 8 + 8] ; pPageCpuPhys148 mov [rbx + VMCSCACHE.TestIn. pPageCpuPhys], rax149 mov rax, [rbp + 16 + 8] ; pVMCSPhys150 mov [rbx + VMCSCACHE.TestIn. pVMCSPhys], rax147 mov rax, [rbp + 8 + 8] ; HCPhysCpuPage 148 mov [rbx + VMCSCACHE.TestIn.HCPhysCpuPage], rax 149 mov rax, [rbp + 16 + 8] ; HCPhysVMCS 150 mov [rbx + VMCSCACHE.TestIn.HCPhysVMCS], rax 151 151 mov [rbx + VMCSCACHE.TestIn.pCache], rbx 152 152 mov [rbx + VMCSCACHE.TestIn.pCtx], rsi … … 361 361 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 362 362 %ifdef DEBUG 363 mov rdx, [rsp] ; pVMCSPhys364 mov [rdi + VMCSCACHE.TestOut. pVMCSPhys], rdx363 mov rdx, [rsp] ; HCPhysVMCS 364 mov [rdi + VMCSCACHE.TestOut.HCPhysVMCS], rdx 365 365 %endif 366 366 %endif -
trunk/src/VBox/VMM/include/HWACCMInternal.h
r37320 r37323 517 517 struct 518 518 { 519 RTHCPHYS pPageCpuPhys;520 RTHCPHYS pVMCSPhys;519 RTHCPHYS HCPhysCpuPage; 520 RTHCPHYS HCPhysVMCS; 521 521 RTGCPTR pCache; 522 522 RTGCPTR pCtx; … … 524 524 struct 525 525 { 526 RTHCPHYS pVMCSPhys;526 RTHCPHYS HCPhysVMCS; 527 527 RTGCPTR pCache; 528 528 RTGCPTR pCtx; … … 592 592 { 593 593 /** Physical address of the VM control structure (VMCS). */ 594 RTHCPHYS pVMCSPhys;594 RTHCPHYS HCPhysVMCS; 595 595 /** R0 memory object for the VM control structure (VMCS). */ 596 RTR0MEMOBJ pMemObjVMCS;596 RTR0MEMOBJ hMemObjVMCS; 597 597 /** Virtual address of the VM control structure (VMCS). */ 598 R0PTRTYPE(void *) p VMCS;598 R0PTRTYPE(void *) pvVMCS; 599 599 600 600 /** Ring 0 handlers for VT-x. */ … … 612 612 613 613 /** Physical address of the virtual APIC page for TPR caching. */ 614 RTHCPHYS pVAPICPhys;614 RTHCPHYS HCPhysVAPIC; 615 615 /** R0 memory object for the virtual APIC page for TPR caching. */ 616 RTR0MEMOBJ pMemObjVAPIC;616 RTR0MEMOBJ hMemObjVAPIC; 617 617 /** Virtual address of the virtual APIC page for TPR caching. */ 618 R0PTRTYPE(uint8_t *) p VAPIC;618 R0PTRTYPE(uint8_t *) pbVAPIC; 619 619 620 620 /** Current CR0 mask. */ -
trunk/src/VBox/VMM/include/HWACCMInternal.mac
r35333 r37323 17 17 %define VMX_USE_CACHED_VMCS_ACCESSES 18 18 19 ;Maximum number of cached entries. 19 ;Maximum number of cached entries. 20 20 %define VMCSCACHE_MAX_ENTRY 128 21 21 22 ; Structure for storing read and write VMCS actions. 22 ; Structure for storing read and write VMCS actions. 23 23 struc VMCSCACHE 24 24 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 43 43 .Read.aFieldVal resq VMCSCACHE_MAX_ENTRY 44 44 %ifdef DEBUG 45 .TestIn. pPageCpuPhysresq 146 .TestIn. pVMCSPhysresq 145 .TestIn.HCPhysCpuPage resq 1 46 .TestIn.HCPhysVMCS resq 1 47 47 .TestIn.pCache resq 1 48 48 .TestIn.pCtx resq 1 49 .TestOut. pVMCSPhysresq 149 .TestOut.HCPhysVMCS resq 1 50 50 .TestOut.pCache resq 1 51 51 .TestOut.pCtx resq 1 -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r36946 r37323 390 390 CHECK_MEMBER_ALIGNMENT(HWACCM, StatTPRPatchSuccess, 8); 391 391 CHECK_MEMBER_ALIGNMENT(HWACCMCPU, StatEntry, 8); 392 CHECK_MEMBER_ALIGNMENT(HWACCMCPU, vmx. pVMCSPhys, sizeof(RTHCPHYS));392 CHECK_MEMBER_ALIGNMENT(HWACCMCPU, vmx.HCPhysVMCS, sizeof(RTHCPHYS)); 393 393 CHECK_MEMBER_ALIGNMENT(HWACCMCPU, vmx.proc_ctls, 8); 394 394 CHECK_MEMBER_ALIGNMENT(HWACCMCPU, Event.intInfo, 8);
Note:
See TracChangeset
for help on using the changeset viewer.