Changeset 66276 in vbox
- Timestamp:
- Mar 28, 2017 7:14:16 AM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 114200
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/cpum.mac
r66227 r66276 264 264 .hwvirt.svm.fGif resb 1 265 265 .hwvirt.svm.abPadding0 resb 7 266 .hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 267 .hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1 268 .hwvirt.svm.pvIoBitmapR0 RTR0PTR_RES 1 269 .hwvirt.svm.pvIoBitmapR3 RTR3PTR_RES 1 270 %if HC_ARCH_BITS == 32 271 .hwvirt.svm.abPadding1 resb 16 272 %endif 266 273 .hwvirt.fLocalForcedActions resd 1 267 274 alignb 64 -
trunk/include/VBox/vmm/cpumctx.h
r66227 r66276 476 476 { 477 477 /** 728 - MSR holding physical address of the Guest's Host-state. */ 478 uint64_t uMsrHSavePa;478 uint64_t uMsrHSavePa; 479 479 /** 736 - Guest physical address of the nested-guest VMCB. */ 480 RTGCPHYS GCPhysVmcb;480 RTGCPHYS GCPhysVmcb; 481 481 /** 744 - Cache of the nested-guest VMCB control area. */ 482 SVMVMCBCTRL VmcbCtrl;482 SVMVMCBCTRL VmcbCtrl; 483 483 /** 1000 - Guest's host-state save area. */ 484 SVMHOSTSTATE HostState;484 SVMHOSTSTATE HostState; 485 485 /** 1184 - Global interrupt flag. */ 486 uint8_t fGif;486 uint8_t fGif; 487 487 /** 1185 - Padding. */ 488 uint8_t abPadding0[7]; 488 uint8_t abPadding0[7]; 489 /** 1192 - MSR permission bitmap - R0 ptr. */ 490 R0PTRTYPE(void *) pvMsrBitmapR0; 491 /** 1200 / 1196 - MSR permission bitmap - R3 ptr. */ 492 R3PTRTYPE(void *) pvMsrBitmapR3; 493 /** 1208 / 1200 - IO permission bitmap - R0 ptr. */ 494 R0PTRTYPE(void *) pvIoBitmapR0; 495 /** 1216 / 1204 - IO permission bitmap - R3 ptr. */ 496 R3PTRTYPE(void *) pvIoBitmapR3; 497 #if HC_ARCH_BITS == 32 498 /** NA / 1200 - Padding. */ 499 uint8_t abPadding1[16]; 500 #endif 489 501 } svm; 490 502 #if 0 … … 495 507 } CPUM_UNION_NM(s); 496 508 497 /** 1 192- A subset of force flags that are preserved while running509 /** 1224 - A subset of force flags that are preserved while running 498 510 * the nested-guest. */ 499 511 uint32_t fLocalForcedActions; 500 /** Padding. */501 uint8_t abPadding1[ 20];512 /** 1212 - Padding. */ 513 uint8_t abPadding1[52]; 502 514 } hwvirt; 503 515 /** @} */ … … 560 572 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.HostState, 1000); 561 573 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.fGif, 1184); 562 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) fLocalForcedActions, 1192); 574 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR0, 1192); 575 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvMsrBitmapR3, HC_ARCH_BITS == 64 ? 1200 : 1196); 576 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR0, HC_ARCH_BITS == 64 ? 1208 : 1200); 577 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) svm.pvIoBitmapR3, HC_ARCH_BITS == 64 ? 1216 : 1204); 578 AssertCompileMemberOffset(CPUMCTX, hwvirt.CPUM_UNION_NM(s.) fLocalForcedActions, 1224); 563 579 564 580 AssertCompileMembersAtSameOffset(CPUMCTX, CPUM_UNION_STRUCT_NM(g,qw.) rax, CPUMCTX, CPUM_UNION_NM(g.) aGRegs); -
trunk/include/VBox/vmm/hm_svm.h
r66227 r66276 73 73 /** Bit 13 - AVIC - Advanced Virtual Interrupt Controller. */ 74 74 #define AMD_CPUID_SVM_FEATURE_EDX_AVIC RT_BIT(13) 75 /** @} */ 76 77 /** @name SVM generic / convenient defines. 78 * @{ 79 */ 80 /** Number of pages required for the VMCB. */ 81 #define SVM_VMCB_PAGES 1 82 /** Number of pages required for the MSR permission bitmap. */ 83 #define SVM_MSRPM_PAGES 2 84 /** Number of pages required for the IO permission bitmap. */ 85 #define SVM_IOPM_PAGES 3 75 86 /** @} */ 76 87 … … 553 564 uint32_t u1IgnoreTPR : 1; /* V_IGN_TPR */ 554 565 uint32_t u3Reserved : 3; 555 uint32_t u1VI rqMasking: 1; /* V_INTR_MASKING */566 uint32_t u1VIntrMasking : 1; /* V_INTR_MASKING */ 556 567 uint32_t u6Reserved : 6; 557 568 uint32_t u1AvicEnable : 1; -
trunk/src/VBox/VMM/VMMAll/HMSVMAll.cpp
r66254 r66276 484 484 } 485 485 486 /* 487 * Update the exit interruption info field so that if an exception occurs 488 * while delivering the event causing a #VMEXIT, we only need to update 489 * the valid bit while the rest is already in place. 490 */ 491 pVmcbCtrl->ExitIntInfo.u = pVmcbCtrl->EventInject.u; 492 pVmcbCtrl->ExitIntInfo.n.u1Valid = 0; 493 486 494 /** @todo NRIP: Software interrupts can only be pushed properly if we support 487 495 * NRIP for the nested-guest to calculate the instruction length 488 496 * below. */ 489 IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */); 497 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, uVector, enmType, uErrorCode, pCtx->cr2, 0 /* cbInstr */); 498 if (rcStrict == VINF_SVM_VMEXIT) 499 return rcStrict; 490 500 } 491 501 … … 577 587 Assert(pCtx->hwvirt.svm.VmcbCtrl.IntCtrl.n.u8VIrqVector); 578 588 } 579 /* Save V_TPR. */ 580 589 /** @todo Save V_TPR, V_IRQ. */ 581 590 /** @todo NRIP. */ 582 591 … … 587 596 588 597 /* 589 * Clear event injection .598 * Clear event injection in the VMCB. 590 599 */ 591 600 pCtx->hwvirt.svm.VmcbCtrl.EventInject.n.u1Valid = 0; … … 688 697 } 689 698 699 700 #if 0 701 VMM_INT_DECL(int) HMSvmNstGstGetInterrupt(PVMCPU pVCpu) 702 { 703 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 704 Assert(CPUMIsGuestInNestedHwVirtMode(pCtx)); 705 706 PCSVMVMCBCTRL pVmcbCtrl = &pCtx->hwvirt.svm.VmcbCtrl; 707 Assert(RT_BOOL(pVmcbCtrl->IntCtrl.n.u1VIrqValid)); 708 709 if (pVmcbCtrl->IntCtrl.n.u1VIntrMasking) 710 { 711 } 712 } 713 #endif -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r66227 r66276 411 411 */ 412 412 Assert(g_hMemObjIOBitmap == NIL_RTR0MEMOBJ); 413 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, 3 << PAGE_SHIFT, false /* fExecutable */);413 int rc = RTR0MemObjAllocCont(&g_hMemObjIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, false /* fExecutable */); 414 414 if (RT_FAILURE(rc)) 415 415 return rc; … … 419 419 420 420 /* Set all bits to intercept all IO accesses. */ 421 ASMMemFill32(g_pvIOBitmap, 3 << PAGE_SHIFT, UINT32_C(0xffffffff));421 ASMMemFill32(g_pvIOBitmap, SVM_IOPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff)); 422 422 return VINF_SUCCESS; 423 423 } … … 519 519 * FS, GS, Kernel GS Base, etc.) apart from the host-state save area specified in MSR_K8_VM_HSAVE_PA. 520 520 */ 521 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, 1<< PAGE_SHIFT, false /* fExecutable */);521 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcbHost, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */); 522 522 if (RT_FAILURE(rc)) 523 523 goto failure_cleanup; … … 531 531 * Allocate one page for the guest-state VMCB. 532 532 */ 533 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, 1<< PAGE_SHIFT, false /* fExecutable */);533 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjVmcb, SVM_VMCB_PAGES << PAGE_SHIFT, false /* fExecutable */); 534 534 if (RT_FAILURE(rc)) 535 535 goto failure_cleanup; … … 544 544 * SVM to not require one. 545 545 */ 546 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */); 546 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, 547 false /* fExecutable */); 547 548 if (RT_FAILURE(rc)) 548 549 goto failure_cleanup; … … 551 552 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0 /* iPage */); 552 553 /* Set all bits to intercept all MSR accesses (changed later on). */ 553 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, UINT32_C(0xffffffff));554 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, SVM_MSRPM_PAGES << X86_PAGE_4K_SHIFT, UINT32_C(0xffffffff)); 554 555 } 555 556 … … 726 727 727 728 /* Virtualize masking of INTR interrupts. (reads/writes from/to CR8 go to the V_TPR register) */ 728 pVmcb->ctrl.IntCtrl.n.u1VI rqMasking = 1;729 pVmcb->ctrl.IntCtrl.n.u1VIntrMasking = 1; 729 730 730 731 /* Ignore the priority in the virtual TPR. This is necessary for delivering PIC style (ExtInt) interrupts … … 1589 1590 /* Assume that we need to trap all TPR accesses and thus need not check on 1590 1591 every #VMEXIT if we should update the TPR. */ 1591 Assert(pVmcb->ctrl.IntCtrl.n.u1VI rqMasking);1592 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking); 1592 1593 pVCpu->hm.s.svm.fSyncVTpr = false; 1593 1594 … … 2607 2608 bool const fIntShadow = RT_BOOL(hmR0SvmGetGuestIntrShadow(pVCpu, pCtx)); 2608 2609 bool const fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 2609 bool const fBlockNmi = RT_BOOL(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS));2610 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 2610 2611 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 2611 2612 … … 2765 2766 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR)); 2766 2767 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved)); 2767 Log4(("ctrl.IntCtrl.u1VI rqMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqMasking));2768 Log4(("ctrl.IntCtrl.u1VIntrMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIntrMasking)); 2768 2769 Log4(("ctrl.IntCtrl.u6Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved)); 2769 2770 Log4(("ctrl.IntCtrl.u8VIrqVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIrqVector)); -
trunk/src/VBox/VMM/VMMR3/CPUM.cpp
r65904 r66276 750 750 751 751 /** 752 * Frees memory allocated by cpumR3AllocHwVirtState(). 753 * 754 * @param pVM The cross context VM structure. 755 */ 756 static void cpumR3FreeHwVirtState(PVM pVM) 757 { 758 if (pVM->cpum.ro.GuestFeatures.fSvm) 759 { 760 for (VMCPUID i = 0; i < pVM->cCpus; i++) 761 { 762 PVMCPU pVCpu = &pVM->aCpus[i]; 763 if (pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3) 764 { 765 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3, SVM_MSRPM_PAGES); 766 pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3 = NULL; 767 } 768 769 if (pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR3) 770 { 771 SUPR3PageFreeEx(pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR3, SVM_IOPM_PAGES); 772 pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR3 = NULL; 773 } 774 } 775 } 776 } 777 778 779 /** 780 * Allocates memory required by the hardware virtualization state. 781 * 782 * @returns VBox status code. 783 * @param pVM The cross context VM structure. 784 */ 785 static int cpumR3AllocHwVirtState(PVM pVM) 786 { 787 int rc = VINF_SUCCESS; 788 if (pVM->cpum.ro.GuestFeatures.fSvm) 789 { 790 for (VMCPUID i = 0; i < pVM->cCpus; i++) 791 { 792 PVMCPU pVCpu = &pVM->aCpus[i]; 793 794 /* 795 * Allocate the MSRPM (MSR Permission bitmap). 796 */ 797 Assert(!pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3); 798 rc = SUPR3PageAllocEx(SVM_MSRPM_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3, 799 &pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR0, NULL /* paPages */); 800 if (RT_FAILURE(rc)) 801 { 802 Assert(!pVCpu->cpum.s.Guest.hwvirt.svm.pvMsrBitmapR3); 803 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's MSR permission bitmap\n", pVCpu->idCpu, 804 SVM_MSRPM_PAGES)); 805 break; 806 } 807 808 /* 809 * Allocate the IOPM (IO Permission bitmap). 810 */ 811 Assert(!pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR3); 812 rc = SUPR3PageAllocEx(SVM_IOPM_PAGES, 0 /* fFlags */, &pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR3, 813 &pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR0, NULL /* paPages */); 814 if (RT_FAILURE(rc)) 815 { 816 Assert(!pVCpu->cpum.s.Guest.hwvirt.svm.pvIoBitmapR3); 817 LogRel(("CPUM%u: Failed to alloc %u pages for the nested-guest's IO permission bitmap\n", pVCpu->idCpu, 818 SVM_IOPM_PAGES)); 819 break; 820 } 821 } 822 823 /* On any failure, cleanup. */ 824 if (RT_FAILURE(rc)) 825 cpumR3FreeHwVirtState(pVM); 826 } 827 828 return rc; 829 } 830 831 832 /** 752 833 * Initializes the CPUM. 753 834 * … … 883 964 884 965 /* 885 * Setup hypervisor startup values. 886 */ 966 * Allocate memory required by the hardware virtualization state. 967 */ 968 rc = cpumR3AllocHwVirtState(pVM); 969 if (RT_FAILURE(rc)) 970 return rc; 887 971 888 972 /* … … 1005 1089 pCtx->dr[5] = 0; 1006 1090 } 1007 #else1008 NOREF(pVM);1009 1091 #endif 1092 1093 cpumR3FreeHwVirtState(pVM); 1010 1094 return VINF_SUCCESS; 1011 1095 } -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r66227 r66276 237 237 .Guest.hwvirt.svm.fGif resb 1 238 238 .Guest.hwvirt.svm.abPadding0 resb 7 239 .Guest.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 240 .Guest.hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1 241 .Guest.hwvirt.svm.pvIoBitmapR0 RTR0PTR_RES 1 242 .Guest.hwvirt.svm.pvIoBitmapR3 RTR3PTR_RES 1 243 %if HC_ARCH_BITS == 32 244 .Guest.hwvirt.svm.abPadding1 resb 16 245 %endif 239 246 .Guest.hwvirt.fLocalForcedActions resd 1 240 247 alignb 64 … … 507 514 .Hyper.hwvirt.svm.fGif resb 1 508 515 .Hyper.hwvirt.svm.abPadding0 resb 7 516 .Hyper.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 517 .Hyper.hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1 518 .Hyper.hwvirt.svm.pvIoBitmapR0 RTR0PTR_RES 1 519 .Hyper.hwvirt.svm.pvIoBitmapR3 RTR3PTR_RES 1 520 %if HC_ARCH_BITS == 32 521 .Hyper.hwvirt.svm.abPadding1 resb 16 522 %endif 509 523 .Hyper.hwvirt.fLocalForcedActions resd 1 510 524 alignb 64
Note:
See TracChangeset
for help on using the changeset viewer.