Changeset 43469 in vbox for trunk/src/VBox
- Timestamp:
- Sep 28, 2012 3:40:14 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r43468 r43469 294 294 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 295 295 { 296 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjM SRBitmap, PAGE_SIZE, false /* fExecutable */);296 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, PAGE_SIZE, false /* fExecutable */); 297 297 AssertRC(rc); 298 298 if (RT_FAILURE(rc)) 299 299 return rc; 300 300 301 pVCpu->hm.s.vmx.pvM SRBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjMSRBitmap);302 pVCpu->hm.s.vmx.HCPhysM SRBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjMSRBitmap, 0);303 memset(pVCpu->hm.s.vmx.pvM SRBitmap, 0xff, PAGE_SIZE);301 pVCpu->hm.s.vmx.pvMsrBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjMsrBitmap); 302 pVCpu->hm.s.vmx.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjMsrBitmap, 0); 303 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE); 304 304 } 305 305 306 306 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 307 307 /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */ 308 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjGuestM SR, PAGE_SIZE, false /* fExecutable */);308 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjGuestMsr, PAGE_SIZE, false /* fExecutable */); 309 309 AssertRC(rc); 310 310 if (RT_FAILURE(rc)) 311 311 return rc; 312 312 313 pVCpu->hm.s.vmx.pvGuestM SR = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjGuestMSR);314 pVCpu->hm.s.vmx.HCPhysGuestM SR = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjGuestMSR, 0);315 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestM SR& 0xf));316 memset(pVCpu->hm.s.vmx.pvGuestM SR, 0, PAGE_SIZE);313 pVCpu->hm.s.vmx.pvGuestMsr = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjGuestMsr); 314 pVCpu->hm.s.vmx.HCPhysGuestMsr = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjGuestMsr, 0); 315 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); 316 memset(pVCpu->hm.s.vmx.pvGuestMsr, 0, PAGE_SIZE); 317 317 318 318 /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */ 319 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjHostM SR, PAGE_SIZE, false /* fExecutable */);319 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjHostMsr, PAGE_SIZE, false /* fExecutable */); 320 320 AssertRC(rc); 321 321 if (RT_FAILURE(rc)) 322 322 return rc; 323 323 324 pVCpu->hm.s.vmx.pvHostM SR = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjHostMSR);325 pVCpu->hm.s.vmx.HCPhysHostM SR = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjHostMSR, 0);326 Assert(!(pVCpu->hm.s.vmx.HCPhysHostM SR& 0xf));327 memset(pVCpu->hm.s.vmx.pvHostM SR, 0, PAGE_SIZE);324 pVCpu->hm.s.vmx.pvHostMsr = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjHostMsr); 325 pVCpu->hm.s.vmx.HCPhysHostMsr = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjHostMsr, 0); 326 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); 327 memset(pVCpu->hm.s.vmx.pvHostMsr, 0, PAGE_SIZE); 328 328 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ 329 329 … … 366 366 pVCpu->hm.s.vmx.HCPhysVAPIC = 0; 367 367 } 368 if (pVCpu->hm.s.vmx.hMemObjM SRBitmap != NIL_RTR0MEMOBJ)369 { 370 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjM SRBitmap, false);371 pVCpu->hm.s.vmx.hMemObjM SRBitmap = NIL_RTR0MEMOBJ;372 pVCpu->hm.s.vmx.pvM SRBitmap = 0;373 pVCpu->hm.s.vmx.HCPhysM SRBitmap = 0;368 if (pVCpu->hm.s.vmx.hMemObjMsrBitmap != NIL_RTR0MEMOBJ) 369 { 370 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjMsrBitmap, false); 371 pVCpu->hm.s.vmx.hMemObjMsrBitmap = NIL_RTR0MEMOBJ; 372 pVCpu->hm.s.vmx.pvMsrBitmap = 0; 373 pVCpu->hm.s.vmx.HCPhysMsrBitmap = 0; 374 374 } 375 375 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 376 if (pVCpu->hm.s.vmx.hMemObjHostM SR!= NIL_RTR0MEMOBJ)377 { 378 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjHostM SR, false);379 pVCpu->hm.s.vmx.hMemObjHostM SR= NIL_RTR0MEMOBJ;380 pVCpu->hm.s.vmx.pvHostM SR= 0;381 pVCpu->hm.s.vmx.HCPhysHostM SR= 0;382 } 383 if (pVCpu->hm.s.vmx.hMemObjGuestM SR!= NIL_RTR0MEMOBJ)384 { 385 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjGuestM SR, false);386 pVCpu->hm.s.vmx.hMemObjGuestM SR= NIL_RTR0MEMOBJ;387 pVCpu->hm.s.vmx.pvGuestM SR= 0;388 pVCpu->hm.s.vmx.HCPhysGuestM SR= 0;376 if (pVCpu->hm.s.vmx.hMemObjHostMsr != NIL_RTR0MEMOBJ) 377 { 378 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjHostMsr, false); 379 pVCpu->hm.s.vmx.hMemObjHostMsr = NIL_RTR0MEMOBJ; 380 pVCpu->hm.s.vmx.pvHostMsr = 0; 381 pVCpu->hm.s.vmx.HCPhysHostMsr = 0; 382 } 383 if (pVCpu->hm.s.vmx.hMemObjGuestMsr != NIL_RTR0MEMOBJ) 384 { 385 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjGuestMsr, false); 386 pVCpu->hm.s.vmx.hMemObjGuestMsr = NIL_RTR0MEMOBJ; 387 pVCpu->hm.s.vmx.pvGuestMsr = 0; 388 pVCpu->hm.s.vmx.HCPhysGuestMsr = 0; 389 389 } 390 390 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ … … 570 570 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 571 571 { 572 Assert(pVCpu->hm.s.vmx.HCPhysM SRBitmap);572 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap); 573 573 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS; 574 574 } … … 658 658 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS) 659 659 { 660 Assert(pVCpu->hm.s.vmx.HCPhysM SRBitmap);661 662 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysM SRBitmap);660 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap); 661 662 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap); 663 663 AssertRC(rc); 664 664 … … 684 684 * Set the guest & host MSR load/store physical addresses. 685 685 */ 686 Assert(pVCpu->hm.s.vmx.HCPhysGuestM SR);687 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestM SR);686 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr); 687 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 688 688 AssertRC(rc); 689 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestM SR);689 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr); 690 690 AssertRC(rc); 691 Assert(pVCpu->hm.s.vmx.HCPhysHostM SR);692 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostM SR);691 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr); 692 rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr); 693 693 AssertRC(rc); 694 694 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */ … … 808 808 { 809 809 unsigned ulBit; 810 uint8_t *pvM SRBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMSRBitmap;810 uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap; 811 811 812 812 /* … … 827 827 /* AMD Sixth Generation x86 Processor MSRs */ 828 828 ulBit = (ulMSR - 0xC0000000); 829 pvM SRBitmap += 0x400;829 pvMsrBitmap += 0x400; 830 830 } 831 831 else … … 837 837 Assert(ulBit <= 0x1fff); 838 838 if (fRead) 839 ASMBitClear(pvM SRBitmap, ulBit);839 ASMBitClear(pvMsrBitmap, ulBit); 840 840 else 841 ASMBitSet(pvM SRBitmap, ulBit);841 ASMBitSet(pvMsrBitmap, ulBit); 842 842 843 843 if (fWrite) 844 ASMBitClear(pvM SRBitmap + 0x800, ulBit);844 ASMBitClear(pvMsrBitmap + 0x800, ulBit); 845 845 else 846 ASMBitSet(pvM SRBitmap + 0x800, ulBit);846 ASMBitSet(pvMsrBitmap + 0x800, ulBit); 847 847 } 848 848 … … 1356 1356 * the world switch back to the host. 1357 1357 */ 1358 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostM SR;1358 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr; 1359 1359 unsigned idxMsr = 0; 1360 1360 … … 2152 2152 * during VM-entry and restored into the VM-exit store area during VM-exit. 2153 2153 */ 2154 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestM SR;2154 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr; 2155 2155 unsigned idxMsr = 0; 2156 2156 … … 2206 2206 } 2207 2207 2208 pVCpu->hm.s.vmx.cCachedM SRs = idxMsr;2208 pVCpu->hm.s.vmx.cCachedMsrs = idxMsr; 2209 2209 2210 2210 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr); … … 2405 2405 * Save the possibly changed MSRs that we automatically restore and save during a world switch. 2406 2406 */ 2407 for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedM SRs; i++)2408 { 2409 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestM SR;2407 for (unsigned i = 0; i < pVCpu->hm.s.vmx.cCachedMsrs; i++) 2408 { 2409 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr; 2410 2410 pMsr += i; 2411 2411 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r43468 r43469 1114 1114 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1115 1115 { 1116 LogRel(("HM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysM SRBitmap));1116 LogRel(("HM: VCPU%d: MSR bitmap physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 1117 1117 LogRel(("HM: VCPU%d: VMCS physaddr = %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysVMCS)); 1118 1118 } … … 2688 2688 if (pVM->aCpus[i].hm.s.vmx.lasterror.ulInstrError == VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS) 2689 2689 { 2690 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysM SRBitmap));2690 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d MSRBitmapPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysMsrBitmap)); 2691 2691 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2692 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestM SR));2693 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostM SR));2694 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs %x\n", i, pVM->aCpus[i].hm.s.vmx.cCachedM SRs));2692 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d GuestMSRPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysGuestMsr)); 2693 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d HostMsrPhys %RHp\n", i, pVM->aCpus[i].hm.s.vmx.HCPhysHostMsr)); 2694 LogRel(("VERR_VMX_UNABLE_TO_START_VM: Cpu%d Cached MSRs %x\n", i, pVM->aCpus[i].hm.s.vmx.cCachedMsrs)); 2695 2695 #endif 2696 2696 } -
trunk/src/VBox/VMM/include/HMInternal.h
r43468 r43469 336 336 R0PTRTYPE(uint8_t *) pbApicAccess; 337 337 338 /** R0 memory object for the MSR entry load page (guest MSRs). */339 RTR0MEMOBJ hMemObjMSREntryLoad;340 /** Physical address of the MSR entry load page (guest MSRs). */341 RTHCPHYS HCPhysMSREntryLoad;342 /** Virtual address of the MSR entry load page (guest MSRs). */343 R0PTRTYPE(void *) pvMSREntryLoad;344 345 338 #ifdef VBOX_WITH_CRASHDUMP_MAGIC 346 339 RTR0MEMOBJ hMemObjScratch; … … 348 341 R0PTRTYPE(uint8_t *) pbScratch; 349 342 #endif 350 /** R0 memory object for the MSR exit store page (guest MSRs). */351 RTR0MEMOBJ hMemObjMSRExitStore;352 /** Physical address of the MSR exit store page (guest MSRs). */353 RTHCPHYS HCPhysMSRExitStore;354 /** Virtual address of the MSR exit store page (guest MSRs). */355 R0PTRTYPE(void *) pvMSRExitStore;356 357 /** R0 memory object for the MSR exit load page (host MSRs). */358 RTR0MEMOBJ hMemObjMSRExitLoad;359 /** Physical address of the MSR exit load page (host MSRs). */360 RTHCPHYS HCPhysMSRExitLoad;361 /** Virtual address of the MSR exit load page (host MSRs). */362 R0PTRTYPE(void *) pvMSRExitLoad;363 364 343 /** Ring 0 handlers for VT-x. */ 365 344 DECLR0CALLBACKMEMBER(void, pfnSetupTaggedTLB, (PVM pVM, PVMCPU pVCpu)); … … 605 584 RTHCPHYS GCPhysEPTP; 606 585 607 /** Physical address of the MSR bitmap (1 page). */608 RTHCPHYS HCPhysM SRBitmap;609 /** R0 memory object for the MSR bitmap (1 page). */610 RTR0MEMOBJ hMemObjM SRBitmap;611 /** Virtual address of the MSR bitmap (1 page). */612 R0PTRTYPE(void *) pvM SRBitmap;586 /** Physical address of the MSR bitmap. */ 587 RTHCPHYS HCPhysMsrBitmap; 588 /** R0 memory object for the MSR bitmap. */ 589 RTR0MEMOBJ hMemObjMsrBitmap; 590 /** Virtual address of the MSR bitmap. */ 591 R0PTRTYPE(void *) pvMsrBitmap; 613 592 614 593 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 615 /** Physical address of the guest MSR load area (1 page). */ 616 RTHCPHYS HCPhysGuestMSR; 617 /** R0 memory object for the guest MSR load area (1 page). */ 618 RTR0MEMOBJ hMemObjGuestMSR; 619 /** Virtual address of the guest MSR load area (1 page). */ 620 R0PTRTYPE(void *) pvGuestMSR; 621 622 /** Physical address of the MSR load area (1 page). */ 623 RTHCPHYS HCPhysHostMSR; 624 /** R0 memory object for the MSR load area (1 page). */ 625 RTR0MEMOBJ hMemObjHostMSR; 626 /** Virtual address of the MSR load area (1 page). */ 627 R0PTRTYPE(void *) pvHostMSR; 594 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used 595 * for guest MSRs). */ 596 RTHCPHYS HCPhysGuestMsr; 597 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area 598 * (used for guest MSRs). */ 599 RTR0MEMOBJ hMemObjGuestMsr; 600 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used 601 * for guest MSRs). */ 602 R0PTRTYPE(void *) pvGuestMsr; 603 604 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */ 605 RTHCPHYS HCPhysHostMsr; 606 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */ 607 RTR0MEMOBJ hMemObjHostMsr; 608 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */ 609 R0PTRTYPE(void *) pvHostMsr; 628 610 629 611 /* Number of automatically loaded/restored guest MSRs during the world switch. */ 630 uint32_t cCachedM SRs;612 uint32_t cCachedMsrs; 631 613 uint32_t uAlignment; 632 614 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
Note:
See TracChangeset
for help on using the changeset viewer.