VirtualBox

Changeset 46500 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Jun 11, 2013 4:00:10 PM (12 years ago)
Author:
vboxsync
Message:

VMM/HMSVMR0: AMD-V bits.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp

    r46482 r46500  
    494494        /* Set up unconditional intercepts and conditions. */
    495495        pVmcb->ctrl.u32InterceptCtrl1 =   SVM_CTRL1_INTERCEPT_INTR          /* External interrupt causes a VM-exit. */
    496                                         | SVM_CTRL1_INTERCEPT_VINTR         /* When guest enabled interrupts cause a VM-exit. */
     496                                        | SVM_CTRL1_INTERCEPT_VINTR         /* When guest enables interrupts cause a VM-exit. */
    497497                                        | SVM_CTRL1_INTERCEPT_NMI           /* Non-Maskable Interrupts causes a VM-exit. */
    498498                                        | SVM_CTRL1_INTERCEPT_SMI           /* System Management Interrupt cause a VM-exit. */
     
    522522        pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4);
    523523
    524         /* CR0, CR4 writes must be intercepted for obvious reasons. */
     524        /* CR0, CR4 writes must be intercepted for the same reasons as above. */
    525525        pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4);
    526526
     
    532532        pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1;
    533533
    534         /* Ignore the priority in the TPR; just deliver it to the guest when we tell it to. */
     534        /* Ignore the priority in the TPR; we take into account the guest TPR anyway while delivering interrupts. */
    535535        pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR   = 1;
    536536
     
    545545        pVmcb->u64VmcbCleanBits = 0;
    546546
    547         /* The ASID must start at 1; the host uses 0. */
     547        /* The guest ASID MBNZ, set it to 1. The host uses 0. */
    548548        pVmcb->ctrl.TLBCtrl.n.u32ASID = 1;
    549549
     
    869869
    870870        /*
    871          * When Nested Paging is not available use shadow page tables and intercept #PFs (latter done in SVMR0SetupVM()).
     871         * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()).
    872872         */
    873873        if (!pVM->hm.s.fNestedPaging)
     
    914914
    915915        pVmcb->guest.u64CR0 = u64GuestCR0;
    916         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
     916        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
    917917        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
    918918    }
     
    951951            pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu);
    952952
    953         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
     953        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
    954954        pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3;
    955955    }
     
    996996
    997997        pVmcb->guest.u64CR4 = u64GuestCR4;
    998         pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2;
     998        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX;
    999999        pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
    10001000    }
     
    10771077    pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp;
    10781078
    1079     /* Guest EFER MSR. */
    1080     /* AMD-V requires guest EFER.SVME to be set. Weird.
    1081        See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". */
     1079    /*
     1080     * Guest EFER MSR.
     1081     * AMD-V requires guest EFER.SVME to be set. Weird.                                                                                 .
     1082     * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks".
     1083     */
    10821084    pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME;
    10831085
     
    10851087    if (CPUMIsGuestInLongModeEx(pCtx))
    10861088    {
    1087         pVmcb->guest.FS.u64Base      = pCtx->fs.u64Base;
    1088         pVmcb->guest.GS.u64Base      = pCtx->gs.u64Base;
     1089        pVmcb->guest.FS.u64Base = pCtx->fs.u64Base;
     1090        pVmcb->guest.GS.u64Base = pCtx->gs.u64Base;
    10891091    }
    10901092    else
     
    11031105}
    11041106
     1107
    11051108/**
    11061109 * Loads the guest debug registers into the VMCB.
     
    11421145        if (!CPUMIsHyperDebugStateActive(pVCpu))
    11431146        {
    1144             rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
     1147            rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
    11451148            AssertRC(rc);
    11461149
     
    11531156        fInterceptMovDRx = true;
    11541157    }
    1155     else if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
     1158    else if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))
    11561159    {
    11571160        if (!CPUMIsGuestDebugStateActive(pVCpu))
    11581161        {
    1159             rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pMixedCtx, true /* include DR6 */);
     1162            rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
    11601163            AssertRC(rc);
    11611164            STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
     
    12031206 *
    12041207 * @returns VBox status code.
    1205  * @param   pVCpu       Pointer to the VMCPU.
    1206  * @param   pMixedCtx   Pointer to the guest-CPU context. The data may be
    1207  *                      out-of-sync. Make sure to update the required fields
    1208  *                      before using them.
     1208 * @param   pVCpu   Pointer to the VMCPU.
     1209 * @param   pCtx    Pointer to the guest-CPU context.
    12091210 *
    12101211 * @remarks No-long-jump zone!!!
     
    12491250    AssertPtr(pVM);
    12501251    AssertPtr(pVCpu);
    1251     AssertPtr(pMixedCtx);
     1252    AssertPtr(pCtx);
    12521253    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
    12531254
     
    12571258    STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
    12581259
    1259     int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pMixedCtx);
     1260    int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pCtx);
    12601261    AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    12611262
     
    12721273    hmR0SvmLoadGuestDebugRegs(pVCpu, pCtx);
    12731274
    1274     rc = hmR0SvmSetupVMRunHandler(pVCpu, pMixedCtx);
     1275    rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx);
    12751276    AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
    12761277
     
    12991300static void hmR0SvmSetupTscOffsetting(PVMCPU pVCpu)
    13001301{
    1301     PSVMVMCB pVmcb = pVCpu->hm.s.svm.pvVmcb;
     1302    PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
    13021303    if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset))
    13031304    {
     
    13281329
    13291330/**
    1330  * Posts a pending event (trap or external interrupt). An injected event should only
    1331  * be written to the VMCB immediately before VMRUN, otherwise we might have stale events
    1332  * injected across VM resets and suchlike. See @bugref{6220}.
     1331 * Sets an event as a pending event to be injected into the guest.
     1332 *
     1333 * @param   pVCpu               Pointer to the VMCPU.
     1334 * @param   pEvent              Pointer to the SVM event.
     1335 * @param   GCPtrFaultAddress   The fault-address (CR2) in case it's a
     1336 *                              page-fault.
     1337 */
     1338DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress)
     1339{
     1340    Assert(!pVCpu->hm.s.Event.fPending);
     1341
     1342    pVCpu->hm.s.Event.u64IntrInfo       = pEvent->u;
     1343    pVCpu->hm.s.Event.fPending          = true;
     1344    pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
     1345
     1346#ifdef VBOX_STRICT
     1347    if (GCPtrFaultAddress)
     1348    {
     1349        AssertMsg(   pEvent->n.u8Vector == X86_XCPT_PF
     1350                  && pEvent->n.u3Type   == SVM_EVENT_EXCEPTION,
     1351                  ("hmR0SvmSetPendingEvent: Setting fault-address for non-#PF. u8Vector=%#x Type=%#RX32 GCPtrFaultAddr=%#RGx\n",
     1352                   pEvent->n.u8Vector, (uint32_t)pEvent->n.u3Type, GCPtrFaultAddress));
     1353        Assert(GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
     1354    }
     1355#endif
     1356
     1357    Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x ErrorCodeValid=%#x ErrorCode=%#RX32\n", pEvent->u,
     1358          pEvent->n.u8Vector, pEvent->n.u3Type, (uint8_t)pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
     1359}
     1360
     1361
     1362/**
     1363 * Injects an event into the guest upon VMRUN by updating the relevant field
     1364 * in the VMCB.
    13331365 *
    13341366 * @param   pVCpu       Pointer to the VMCPU.
    1335  * @param   pEvent      Pointer to the SVM event.
    1336  */
    1337 DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, SVMEVENT *pEvent)
    1338 {
    1339     Log4(("SVM: Set pending event: intInfo=%#RX64\n", pEvent->u));
    1340 
    1341     /* If there's an event pending already, we're in trouble... */
     1367 * @param   pVmcb       Pointer to the guest VMCB.
     1368 * @param   pCtx        Pointer to the guest-CPU context.
     1369 * @param   pEvent      Pointer to the event.
     1370 *
     1371 * @remarks No-long-jump zone!!!
     1372 * @remarks Requires CR0!
     1373 */
     1374DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent)
     1375{
     1376    pVmcb->ctrl.EventInject.u = pEvent->u;
     1377    STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]);
     1378}
     1379
     1380
     1381/**
     1382 * Converts any TRPM trap into a pending SVM event. This is typically used when
     1383 * entering from ring-3 (not longjmp returns).
     1384 *
     1385 * @param   pVCpu           Pointer to the VMCPU.
     1386 */
     1387static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu)
     1388{
     1389    Assert(TRPMHasTrap(pVCpu));
    13421390    Assert(!pVCpu->hm.s.Event.fPending);
    13431391
    1344     /* Set pending event state. */
    1345     pVCpu->hm.s.Event.u64IntrInfo = pEvent->u;
    1346     pVCpu->hm.s.Event.fPending    = true;
    1347 }
    1348 
     1392    uint8_t     uVector;
     1393    TRPMEVENT   enmTrpmEvent;
     1394    RTGCUINT    uErrCode;
     1395    RTGCUINTPTR GCPtrFaultAddress;
     1396    uint8_t     cbInstr;
     1397
     1398    int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
     1399    AssertRC(rc);
     1400
     1401    PSVMEVENT pEvent = &pVCpu->hm.s.Event;
     1402    pEvent->u         = 0;
     1403    pEvent->n.u1Valid = 1;
     1404
     1405    /* Refer AMD spec. 15.20 "Event Injection" for the format. */
     1406    uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
     1407    if (enmTrpmEvent == TRPM_TRAP)
     1408    {
     1409        pEvent->n.u3Type = SVM_EVENT_EXCEPTION;
     1410        switch (uVector)
     1411        {
     1412            case X86_XCPT_PF:
     1413            case X86_XCPT_DF:
     1414            case X86_XCPT_TS:
     1415            case X86_XCPT_NP:
     1416            case X86_XCPT_SS:
     1417            case X86_XCPT_GP:
     1418            case X86_XCPT_AC:
     1419            {
     1420                pEvent->n.u32ErrorCode     = uErrCode;
     1421                pEvent->n.u1ErrorCodeValid = 1;
     1422                break;
     1423            }
     1424        }
     1425    }
     1426    else if (enmTrpmEvent == TRPM_HARDWARE_INT)
     1427    {
     1428        if (uVector == X86_XCPT_NMI)
     1429            pEvent->n.u3Type = SVM_EVENT_NMI;
     1430        else
     1431            pEvent->n.u3Type = SVM_EVENT_EXTERNAL_IRQ;
     1432    }
     1433    else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
     1434        pEvent->n.u3Type = SVM_EVENT_SOFTWARE_INT;
     1435    else
     1436        AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
     1437
     1438    rc = TRPMResetTrap(pVCpu);
     1439    AssertRC(rc);
     1440
     1441    Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%#x uErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector,
     1442          pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));
     1443}
     1444
     1445
     1446/**
     1447 * Converts any pending SVM event into a TRPM trap. Typically used when leaving
     1448 * AMD-V to execute any instruction.
     1449 *
     1450 * @param   pvCpu           Pointer to the VMCPU.
     1451 */
     1452static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
     1453{
     1454    Assert(pVCpu->hm.s.Event.fPending);
     1455    Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
     1456
     1457    PSVMEVENT pEvent    = &pVCpu->hm.s.Event;
     1458    uint8_t uVector     = pEvent->n.u8Vector;
     1459    uint8_t uVectorType = pEvent->n.u3Type;
     1460
     1461    TRPMEVENT enmTrapType;
     1462    switch (uVectorType)
     1463    {
     1464        case SVM_EVENT_EXTERNAL_IRQ
     1465        case SVM_EVENT_NMI:
     1466           enmTrapType = TRPM_HARDWARE_INT;
     1467           break;
     1468        case SVM_EVENT_SOFTWARE_INT:
     1469            enmTrapType = TRPM_SOFTWARE_INT;
     1470            break;
     1471        case SVM_EVENT_EXCEPTION:
     1472            enmTrapType = TRPM_TRAP;
     1473            break;
     1474        default:
     1475            AssertMsgFailed(("Invalid pending-event type %#x\n", uVectorType));
     1476            enmTrapType = TRPM_32BIT_HACK;
     1477            break;
     1478    }
     1479
     1480    Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType));
     1481
     1482    int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
     1483    AssertRC(rc);
     1484
     1485    if (pEvent->n.u1ErrorCodeValid)
     1486        TRPMSetErrorCode(pVCpu, pEvent->n.u32ErrorCode);
     1487
     1488    if (   uVectorType == SVM_EVENT_EXCEPTION
     1489        && uVector     == X86_XCPT_PF)
     1490    {
     1491        TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
     1492        Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu));
     1493    }
     1494    else if (uVectorType == SVM_EVENT_SOFTWARE_INT)
     1495    {
     1496        AssertMsg(   uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
     1497                  || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
     1498                  ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
     1499        TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
     1500    }
     1501    pVCpu->hm.s.Event.fPending = false;
     1502}
     1503
     1504
     1505/**
     1506 * Gets the guest's interrupt-shadow.
     1507 *
     1508 * @returns The guest's interrupt-shadow.
     1509 * @param   pVCpu   Pointer to the VMCPU.
     1510 * @param   pCtx    Pointer to the guest-CPU context.
     1511 *
     1512 * @remarks No-long-jump zone!!!
     1513 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
     1514 */
     1515DECLINLINE(uint32_t) hmR0SvmGetGuestIntrShadow(PVMCPU pVCpu, PCPUMCTX pCtx)
     1516{
     1517    /*
     1518     * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
     1519     * inhibit interrupts or clear any existing interrupt-inhibition.
     1520     */
     1521    uint32_t uIntrState = 0;
     1522    if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
     1523    {
     1524        if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
     1525        {
     1526            /*
     1527             * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
     1528             * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct.
     1529             */
     1530            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
     1531        }
     1532        else
     1533            uIntrState = SVM_INTERRUPT_SHADOW_ACTIVE;
     1534    }
     1535    return uIntrState;
     1536}
     1537
     1538
     1539/**
     1540 * Sets the virtual interrupt intercept control in the VMCB which
     1541 * instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to
     1542 * receive interrupts.
     1543 *
     1544 * @param pVmcb         Pointer to the VMCB.
     1545 */
     1546DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb)
     1547{
     1548    if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR))
     1549    {
     1550        pVmcb->ctrl.IntCtrl.n.u1VIrqValid  = 1;     /* A virtual interrupt is pending. */
     1551        pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0;     /* Not necessary as we #VMEXIT for delivering the interrupt. */
     1552        pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR;
     1553        pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS;
     1554    }
     1555}
     1556
     1557
     1558/**
     1559 * Injects any pending events into the guest if the guest is in a state to
     1560 * receive them.
     1561 *
     1562 * @param   pVCpu       Pointer to the VMCPU.
     1563 * @param   pCtx        Pointer to the guest-CPU context.
     1564 */
     1565static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx)
     1566{
     1567    Assert(!TRPMHasTrap(pVCpu));
     1568
     1569    const bool fIntShadow = !!hmR0SvmGetGuestIntrShadow(pVCpu, pCtx);
     1570    PSVMVMCB pVmcb        = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1571
     1572    SVMEVENT Event;
     1573    Event.u = 0;
     1574    if (pVCpu->hm.s.Event.fPending)                            /* First, inject any pending HM events. */
     1575    {
     1576        Event.u = pVCpu->hm.s.Event.u64IntrInfo;
     1577        bool fInject = true;
     1578        if (   fIntShadow
     1579            && (   Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ
     1580                || Event.n.u3Type == SVM_EVENT_NMI))
     1581        {
     1582            fInject = false;
     1583        }
     1584
     1585        if (   fInject
     1586            && Event.n.u1Valid)
     1587        {
     1588            pVCpu->hm.s.Event.fPending = false;
     1589            hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
     1590        }
     1591        else
     1592            hmR0SvmSetVirtIntrIntercept(pVmcb);
     1593    }                                                          /** @todo SMI. SMIs take priority over NMIs. */
     1594    else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI))   /* NMI. NMIs take priority over regular interrupts . */
     1595    {
     1596        if (!fIntShadow)
     1597        {
     1598            Log4(("Injecting NMI\n"));
     1599            Event.n.u1Valid      = 1;
     1600            Event.n.u8Vector     = X86_XCPT_NMI;
     1601            Event.n.u3Type       = SVM_EVENT_NMI;
     1602
     1603            hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
     1604            VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
     1605        }
     1606        else
     1607            hmR0SvmSetVirtIntrIntercept(pVmcb);
     1608    }
     1609    else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)))
     1610    {
     1611        /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */
     1612        const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF);
     1613        if (   !fBlockInt
     1614            && !fIntShadow)
     1615        {
     1616            uint8_t u8Interrupt;
     1617            rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
     1618            if (RT_SUCCESS(rc))
     1619            {
     1620                Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt));
     1621
     1622                Event.n.u1Valid  = 1;
     1623                Event.n.u8Vector = u8Interrupt;
     1624                Event.n.u3Type   = SVM_EVENT_EXTERNAL_IRQ;
     1625
     1626                hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event);
     1627                STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
     1628            }
     1629            else
     1630            {
     1631                /** @todo Does this actually happen? If not turn it into an assertion. */
     1632                Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
     1633                STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
     1634            }
     1635        }
     1636        else
     1637            hmR0SvmSetVirtIntrIntercept(pVmcb);
     1638    }
     1639
     1640    /* Update the guest interrupt shadow in the VMCB. */
     1641    pVmcb->ctrl.u64IntShadow = !!fIntShadow;
     1642}
     1643
     1644
     1645/**
     1646 * Check per-VM and per-VCPU force flag actions that require us to go back to
     1647 * ring-3 for one reason or another.
     1648 *
     1649 * @returns VBox status code (information status code included).
     1650 * @retval VINF_SUCCESS if we don't have any actions that require going back to
     1651 *         ring-3.
     1652 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
     1653 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
     1654 *         interrupts)
     1655 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
     1656 *         all EMTs to be in ring-3.
     1657 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
     1658 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
     1659 *         to the EM loop.
     1660 *
     1661 * @param   pVM         Pointer to the VM.
     1662 * @param   pVCpu       Pointer to the VMCPU.
     1663 * @param   pCtx        Pointer to the guest-CPU context.
     1664 */
     1665static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1666{
     1667    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     1668
     1669    if (   VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
     1670        || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
     1671                               | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3))
     1672    {
     1673        /* Pending HM CR3 sync. No PAE PDPEs (VMCPU_FF_HM_UPDATE_PAE_PDPES) on AMD-V. */
     1674        if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
     1675        {
     1676            rc = PGMUpdateCR3(pVCpu, pCtx->cr3);
     1677            Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3);
     1678            Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
     1679        }
     1680
     1681        /* Pending PGM C3 sync. */
     1682        if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
     1683        {
     1684            rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
     1685            if (rc != VINF_SUCCESS)
     1686            {
     1687                AssertRC(rc);
     1688                Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));
     1689                return rc;
     1690            }
     1691        }
     1692
     1693        /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
     1694        /* -XXX- what was that about single stepping?  */
     1695        if (   VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
     1696            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
     1697        {
     1698            STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
     1699            rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
     1700            Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));
     1701            return rc;
     1702        }
     1703
     1704        /* Pending VM request packets, such as hardware interrupts. */
     1705        if (   VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
     1706            || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
     1707        {
     1708            Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
     1709            return VINF_EM_PENDING_REQUEST;
     1710        }
     1711
     1712        /* Pending PGM pool flushes. */
     1713        if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
     1714        {
     1715            Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
     1716            return VINF_PGM_POOL_FLUSH_PENDING;
     1717        }
     1718
     1719        /* Pending DMA requests. */
     1720        if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
     1721        {
     1722            Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
     1723            return VINF_EM_RAW_TO_R3;
     1724        }
     1725    }
     1726
     1727    /* Paranoia. */
     1728    Assert(rc != VERR_EM_INTERPRETER);
     1729    return VINF_SUCCESS;
     1730}
     1731
     1732
     1733/**
     1734 * Does the preparations before executing guest code in AMD-V.
     1735 *
     1736 * This may cause longjmps to ring-3 and may even result in rescheduling to the
     1737 * recompiler. We must be cautious what we do here regarding committing
     1738 * guest-state information into the the VMCB assuming we assuredly execute the
     1739 * guest in AMD-V. If we fall back to the recompiler after updating the VMCB and
     1740 * clearing the common-state (TRPM/forceflags), we must undo those changes so
     1741 * that the recompiler can (and should) use them when it resumes guest
     1742 * execution. Otherwise such operations must be done when we can no longer
     1743 * exit to ring-3.
     1744 *
     1745 * @returns VBox status code (informational status codes included).
     1746 * @retval VINF_SUCCESS if we can proceed with running the guest.
     1747 * @retval VINF_* scheduling changes, we have to go back to ring-3.
     1748 *
     1749 * @param   pVCpu       Pointer to the VMCPU.
     1750 * @param   pCtx        Pointer to the guest-CPU context.
     1751 */
     1752DECLINE(int) hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx)
     1753{
     1754    /* Check force flag actions that might require us to go back to ring-3. */
     1755    int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pCtx);
     1756    if (rc != VINF_SUCCESS)
     1757        return rc;
     1758
     1759#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     1760    /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */
     1761    pVmxTransient->uEFlags = ASMIntDisableFlags();
     1762    if (RTThreadPreemptIsPending(NIL_RTTHREAD))
     1763    {
     1764        ASMSetFlags(pVmxTransient->uEFlags);
     1765        STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
     1766        /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */
     1767        return VINF_EM_RAW_INTERRUPT;
     1768    }
     1769    VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
     1770    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     1771#endif
     1772
     1773    /** @todo -XXX- TPR patching. */
     1774
     1775    /* Convert any pending TRPM traps to HM events for injection. */
     1776    if (TRPMHasTrap(pVCpu))
     1777        hmR0SvmTrpmTrapToPendingEvent(pVCpu);
     1778
     1779    hmR0SvmInjectPendingEvent(pVCpu, pCtx);
     1780    return VINF_SUCCESS;
     1781}
     1782
     1783
     1784/**
     1785 * Prepares to run guest code in VT-x and we've committed to doing so. This
     1786 * means there is no backing out to ring-3 or anywhere else at this
     1787 * point.
     1788 *
     1789 * @param   pVM             Pointer to the VM.
     1790 * @param   pVCpu           Pointer to the VMCPU.
     1791 * @param   pCtx            Pointer to the guest-CPU context.
     1792 *
     1793 * @remarks Called with preemption disabled.
     1794 * @remarks No-long-jump zone!!!
     1795 */
     1796DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1797{
     1798    Assert(!VMMRZCallRing3IsEnabled(pVCpu));
     1799    Assert(VMMR0IsLogFlushDisabled(pVCpu));
     1800
     1801#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
     1802    /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */
     1803    pVmxTransient->uEFlags = ASMIntDisableFlags();
     1804    VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
     1805#endif
     1806
     1807    /*
     1808     * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging".
     1809     * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run.
     1810     */
     1811    /** @todo The above assumption could be wrong. It's not documented what
     1812     *        should be done wrt to the VMCB Clean Bit, but we'll find out the
     1813     *        hard way. */
     1814    pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging;
     1815
     1816    /* Load the guest state. */
     1817    int rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx);
     1818    AssertRC(rc);
     1819    AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags));
     1820
     1821}
     1822
     1823
     1824/**
     1825 * Wrapper for running the guest code in AMD-V.
     1826 *
     1827 * @returns VBox strict status code.
     1828 * @param   pVM         Pointer to the VM.
     1829 * @param   pVCpu       Pointer to the VMCPU.
     1830 * @param   pCtx        Pointer to the guest-CPU context.
     1831 *
     1832 * @remarks No-long-jump zone!!!
     1833 */
     1834DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1835{
     1836    /*
     1837     * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
     1838     * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
     1839     * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
     1840     */
     1841#ifdef VBOX_WITH_KERNEL_USING_XMM
     1842    return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,
     1843                             pVCpu->hm.s.svm.pfnVMRun);
     1844#else
     1845    return pVCpu->hm.s.svm.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
     1846#endif
     1847}
     1848
     1849
     1850/**
     1851 * Runs the guest code using AMD-V.
     1852 *
     1853 * @returns VBox status code.
     1854 * @param   pVM         Pointer to the VM.
     1855 * @param   pVCpu       Pointer to the VMCPU.
     1856 * @param   pCtx        Pointer to the guest CPU context.
     1857 */
     1858VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
     1859{
     1860    Assert(VMMRZCallRing3IsEnabled(pVCpu));
     1861    Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
     1862
     1863    uint32_t cLoops = 0;
     1864    PSVMVMCB pVmcb  = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb;
     1865    int      rc     = VERR_INTERNAL_ERROR_5;
     1866
     1867    for (;; cLoops++)
     1868    {
     1869        Assert(!HMR0SuspendPending());
     1870        AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
     1871                  ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu,
     1872                  (unsigned)RTMpCpuId(), cLoops));
     1873
     1874        /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
     1875        STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
     1876        rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx);
     1877        if (rc != VINF_SUCCESS)
     1878            break;
     1879
     1880        /*
     1881         * No longjmps to ring-3 from this point on!!!
     1882         * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
     1883         * This also disables flushing of the R0-logger instance (if any).
     1884         */
     1885        VMMRZCallRing3Disable(pVCpu);
     1886        VMMRZCallRing3RemoveNotification(pVCpu);
     1887        hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx);
     1888
     1889        rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx);
     1890
     1891        /** -XXX- todo  */
     1892    }
     1893
     1894    return rc;
     1895}
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette