VirtualBox

Changeset 46530 in vbox for trunk


Ignore:
Timestamp:
Jun 13, 2013 1:32:16 PM (12 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: AMD-V bits, TPR shadowing, other adjustments.

Location:
trunk/src/VBox/VMM
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46514 r46530  
    142142    /** The #VMEXIT exit code (the EXITCODE field in the VMCB). */
    143143    uint64_t        u64ExitCode;
     144    /** The guest's TPR value used for TPR shadowing. */
     145    uint8_t         u8GuestTpr;
    144146} SVMTRANSIENT, *PSVMTRANSIENT;
    145147/** @}  */
     
    449451 *
    450452 * @param   pVCpu       Pointer to the VMCPU.
    451  * @param   uMsr       The MSR.
    452  * @param   fRead       Whether reading is allowed.
    453  * @param   fWrite      Whether writing is allowed.
     453 * @param   uMsr        The MSR for which the access permissions are being set.
     454 * @param   enmRead     MSR read permissions.
     455 * @param   enmWrite    MSR write permissions.
    454456 */
    455457static void hmR0SvmSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, SVMMSREXITREAD enmRead, SVMMSREXITWRITE enmWrite)
     
    626628         * Don't intercept guest read/write accesses to these MSRs.
    627629         */
    628         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    629         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    630         hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    631         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    632         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    633         hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     630        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     631        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_CSTAR,          SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     632        hmR0SvmSetMsrPermission(pVCpu, MSR_K6_STAR,           SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     633        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_SF_MASK,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     634        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_FS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     635        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_GS_BASE,        SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    634636        hmR0SvmSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    635         hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     637        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS,  SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    636638        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
    637639        hmR0SvmSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     
    886888 * @returns VBox status code.
    887889 * @param   pVCpu       Pointer to the VMCPU.
     890 * @param   pVmcb       Pointer to the VMCB.
    888891 * @param   pCtx        Pointer the guest-CPU context.
    889892 *
    890893 * @remarks No-long-jump zone!!!
    891894 */
    892 static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     895DECLINLINE(int) hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    893896{
    894897    /*
     
    10371040}
    10381041
     1042
    10391043/**
    10401044 * Loads the guest segment registers into the VMCB.
     
    10421046 * @returns VBox status code.
    10431047 * @param   pVCpu       Pointer to the VMCPU.
     1048 * @param   pVmcb       Pointer to the VMCB.
    10441049 * @param   pCtx        Pointer to the guest-CPU context.
    10451050 *
    10461051 * @remarks No-long-jump zone!!!
    10471052 */
    1048 static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     1053DECLINLINE(void) hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    10491054{
    10501055    /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */
     
    11001105 *
    11011106 * @param   pVCpu       Pointer to the VMCPU.
     1107 * @param   pVmcb       Pointer to the VMCB.
    11021108 * @param   pCtx        Pointer to the guest-CPU context.
    11031109 *
    11041110 * @remarks No-long-jump zone!!!
    11051111 */
    1106 static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pCtx)
     1112DECLINLINE(void) hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
    11071113{
    11081114    /* Guest Sysenter MSRs. */
     
    11471153 *
    11481154 * @remarks No-long-jump zone!!!
    1149  */
    1150 static void hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
     1155 * @remarks Requires EFLAGS to be up-to-date in the VMCB!
     1156 */
     1157DECLINLINE(void) hmR0SvmLoadGuestDebugRegs(PVMCPU pVCpu, PCPUMCTX pCtx)
    11511158{
    11521159    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
     
    12351242    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
    12361243}
     1244
     1245
     1246/**
     1247 * Loads the guest APIC state (currently just the TPR).
     1248 *
     1249 * @returns VBox status code.
     1250 * @param   pVCpu   Pointer to the VMCPU.
     1251 * @param   pVmcb   Pointer to the VMCB.
     1252 * @param   pCtx    Pointer to the guest-CPU context.
     1253 */
     1254DECLINLINE(int) hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx)
     1255{
     1256    if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_SVM_GUEST_APIC_STATE))
     1257        return VINF_SUCCESS;
     1258
     1259    bool    fPendingIntr;
     1260    uint8_t u8Tpr;
     1261    int rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */);
     1262    AssertRCReturn(rc, rc);
     1263
     1264    /** Assume that we need to trap all TPR accesses and thus need not check on
     1265     *  every #VMEXIT if we should update the TPR. */
     1266    Assert(pVmcb->ctrl.IntCtrl.n.u1VIrqMasking);
     1267    pVCpu->hm.s.svm.fSyncVTpr = false;
     1268
     1269    /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */
     1270    if (pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive)
     1271    {
     1272        pCtx->msrLSTAR = u8LastTPR;
     1273
     1274        /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
     1275        if (fPendingIntr)
     1276            hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE);
     1277        else
     1278        {
     1279            hmR0SvmSetMsrPermission(pVCpu, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE);
     1280            pVCpu->hm.s.svm.fSyncVTpr = true;
     1281        }
     1282
     1283        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM;
     1284    }
     1285    else
     1286    {
     1287        /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
     1288        pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4);
     1289
     1290        /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */
     1291        if (fPending)
     1292            pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8);
     1293        else
     1294        {
     1295            pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8);
     1296            pVCpu->hm.s.svm.fSyncVTpr = true;
     1297        }
     1298
     1299        pVmcb->ctrl.u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1300    }
     1301
     1302    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_SVM_GUEST_APIC_STATE;
     1303    return rc;
     1304}
     1305
    12371306
    12381307/**
     
    13531422    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
    13541423
    1355     int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pCtx);
     1424    int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);
    13561425    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    13571426
    1358     hmR0SvmLoadGuestSegmentRegs(pVCpu, pCtx);
    1359     hmR0SvmLoadGuestMsrs(pVCpu, pCtx);
     1427    hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx);
     1428    hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx);
    13601429
    13611430    pVmcb->guest.u64RIP    = pCtx->rip;
     
    13661435
    13671436    /* hmR0SvmLoadGuestDebugRegs() must be called -after- updating guest RFLAGS as the RFLAGS may need to be changed. */
    1368     hmR0SvmLoadGuestDebugRegs(pVCpu, pCtx);
     1437    hmR0SvmLoadGuestDebugRegs(pVCpu, pVmcb, pCtx);
     1438
     1439    rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx);
     1440    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    13691441
    13701442    rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
     
    22942366#endif
    22952367
    2296     /* -XXX- todo TPR syncing. */
    2297 
    22982368    /*
    22992369     * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
     
    23102380    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
    23112381    STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
     2382
     2383    /*
     2384     * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch
     2385     * so we can update it on the way back if the guest changed the TPR.
     2386     */
     2387    if (pVCpu->hm.s.svm.fSyncVTpr)
     2388    {
     2389        if (pVM->hm.s.fTPRPatchingActive)
     2390            pSvmTransient->u8GuestTpr = pCtx->msrLSTAR;
     2391        else
     2392            pSvmTransient->u8GuestTpr = pVmcb->ctrl.IntCtrl.n.u8VTPR;
     2393    }
    23122394
    23132395    /* Flush the appropriate tagged-TLB entries. */
     
    24012483
    24022484    TMNotifyEndOfExecution(pVCpu);                              /* Notify TM that the guest is no longer running. */
     2485    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     2486
    24032487    Assert(!(ASMGetFlags() & X86_EFL_IF));
    2404     VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
    2405 
    2406     /* -XXX- TPR patching? */
    2407 
    24082488    ASMSetFlags(pSvmTransient->uEFlags);                        /* Enable interrupts. */
    24092489
     
    24142494    hmR0SvmSaveGuestState(pVCpu, pMixedCtx);                    /* Save the guest state from the VMCB to the guest-CPU context. */
    24152495
    2416     /* --XXX- TPR syncing todo */
     2496    if (pVCpu->hm.s.svm.fSyncVTpr)
     2497    {
     2498        /* TPR patching (for 32-bit guests) uses LSTAR MSR for holding the TPR value, otherwise uses the VTPR. */
     2499        if (   pVM->hm.s.fTPRPatchingActive
     2500            && (pCtx->msrLSTAR & 0xff) != pSvmTransient->u8GuestTpr)
     2501        {
     2502            int rc = PDMApicSetTPR(pVCpu, pCtx->msrLSTAR & 0xff);
     2503            AssertRC(rc);
     2504        }
     2505        else if ((uint8_t)(pSvmTransient->u8GuestTpr >> 4) != pVmcb->ctrl.IntCtrl.n.u8VTPR)
     2506        {
     2507            int rc = PDMApicSetTPR(pVCpu, (pVmcb->ctrl.IntCtrl.n.u8VTPR << 4));
     2508            AssertRC(rc);
     2509        }
     2510    }
    24172511
    24182512    /* -XXX- premature interruption during event injection */
     2513
    24192514}
    24202515
  • trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp

    r46517 r46530  
    26592659                if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
    26602660                {
    2661                     /* If there are interrupts pending, intercept CR8 writes, otherwise don't intercept CR8 reads or writes. */
     2661                    /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */
    26622662                    if (fPendingIntr)
    26632663                        hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_INTERCEPT_WRITE);
     
    66956695
    66966696    /* Clear any unused and reserved bits. */
    6697     pVCpu->hm.s.fContextUseFlags &= ~(  HM_CHANGED_GUEST_CR2
    6698                                       | HM_CHANGED_VMX_RESERVED1
    6699                                       | HM_CHANGED_VMX_RESERVED2);
     6697    pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
    67006698
    67016699    AssertMsg(!pVCpu->hm.s.fContextUseFlags,
  • trunk/src/VBox/VMM/include/HMInternal.h

    r46444 r46530  
    128128# define HM_CHANGED_VMX_ENTRY_CTLS               RT_BIT(19)
    129129# define HM_CHANGED_VMX_EXIT_CTLS                RT_BIT(20)
    130 # define HM_CHANGED_VMX_RESERVED1                RT_BIT(21)
    131 # define HM_CHANGED_VMX_RESERVED2                RT_BIT(22)
    132130/* AMD-V specific state. */
    133 # define HM_CHANGED_SVM_INTERCEPT_VECTORS        RT_BIT(16)
    134 # define HM_CHANGED_SVM_IOPM_MSRPM_BITMAPS       RT_BIT(17)
    135 # define HM_CHANGED_SVM_GUEST_ASID               RT_BIT(18)
    136 # define HM_CHANGED_SVM_GUEST_TPR                RT_BIT(19)
    137 # define HM_CHANGED_SVM_GUEST_NP                 RT_BIT(20)
    138 # define HM_CHANGED_SVM_LBR                      RT_BIT(21)
    139 # define HM_CHANGED_SVM_AVIC                     RT_BIT(22)
     131# define HM_CHANGED_SVM_GUEST_APIC_STATE         RT_BIT(16)
     132# define HM_CHANGED_SVM_RESERVED1                RT_BIT(17)
     133# define HM_CHANGED_SVM_RESERVED2                RT_BIT(18)
     134# define HM_CHANGED_SVM_RESERVED3                RT_BIT(19)
     135# define HM_CHANGED_SVM_RESERVED4                RT_BIT(20)
    140136
    141137# define HM_CHANGED_HOST_CONTEXT                 RT_BIT(23)
     
    159155                                                 | HM_CHANGED_VMX_GUEST_APIC_STATE     \
    160156                                                 | HM_CHANGED_VMX_ENTRY_CTLS           \
    161                                                  | HM_CHANGED_VMX_EXIT_CTLS            \
    162                                                  | HM_CHANGED_VMX_RESERVED1            \
    163                                                  | HM_CHANGED_VMX_RESERVED2)
     157                                                 | HM_CHANGED_VMX_EXIT_CTLS)
    164158#endif
    165159
     
    725719        /** Virtual address of the MSR bitmap. */
    726720        R0PTRTYPE(void *)           pvMsrBitmap;
     721
     722        /** Whether VTPR with V_INTR_MASKING set is in effect, indicating
     723         *  we should check if the VTPR changed on every VM-exit. */
     724        bool                        fSyncVTpr;
    727725    } svm;
    728726
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette