Changeset 46500 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Jun 11, 2013 4:00:10 PM (12 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46482 r46500 494 494 /* Set up unconditional intercepts and conditions. */ 495 495 pVmcb->ctrl.u32InterceptCtrl1 = SVM_CTRL1_INTERCEPT_INTR /* External interrupt causes a VM-exit. */ 496 | SVM_CTRL1_INTERCEPT_VINTR /* When guest enable dinterrupts cause a VM-exit. */496 | SVM_CTRL1_INTERCEPT_VINTR /* When guest enables interrupts cause a VM-exit. */ 497 497 | SVM_CTRL1_INTERCEPT_NMI /* Non-Maskable Interrupts causes a VM-exit. */ 498 498 | SVM_CTRL1_INTERCEPT_SMI /* System Management Interrupt cause a VM-exit. */ … … 522 522 pVmcb->ctrl.u16InterceptRdCRx = RT_BIT(0) | RT_BIT(4); 523 523 524 /* CR0, CR4 writes must be intercepted for obvious reasons. */524 /* CR0, CR4 writes must be intercepted for the same reasons as above. */ 525 525 pVmcb->ctrl.u16InterceptWrCRx = RT_BIT(0) | RT_BIT(4); 526 526 … … 532 532 pVmcb->ctrl.IntCtrl.n.u1VIrqMasking = 1; 533 533 534 /* Ignore the priority in the TPR; just deliver it to the guest when we tell it to. */534 /* Ignore the priority in the TPR; we take into account the guest TPR anyway while delivering interrupts. */ 535 535 pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR = 1; 536 536 … … 545 545 pVmcb->u64VmcbCleanBits = 0; 546 546 547 /* The ASID must start at 1; the host uses 0. */547 /* The guest ASID MBNZ, set it to 1. The host uses 0. */ 548 548 pVmcb->ctrl.TLBCtrl.n.u32ASID = 1; 549 549 … … 869 869 870 870 /* 871 * When Nested Paging is not available use shadow page tables and intercept #PFs ( latter done in SVMR0SetupVM()).871 * When Nested Paging is not available use shadow page tables and intercept #PFs (the latter done in SVMR0SetupVM()). 872 872 */ 873 873 if (!pVM->hm.s.fNestedPaging) … … 914 914 915 915 pVmcb->guest.u64CR0 = u64GuestCR0; 916 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR 2;916 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX; 917 917 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0; 918 918 } … … 951 951 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 952 952 953 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR 2;953 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX; 954 954 pVCpu->hm.s.fContextUseFlags &= HM_CHANGED_GUEST_CR3; 955 955 } … … 996 996 997 997 pVmcb->guest.u64CR4 = u64GuestCR4; 998 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR 2;998 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX; 999 999 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4; 1000 1000 } … … 1077 1077 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 1078 1078 1079 /* Guest EFER MSR. */ 1080 /* AMD-V requires guest EFER.SVME to be set. Weird. 1081 See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". */ 1079 /* 1080 * Guest EFER MSR. 1081 * AMD-V requires guest EFER.SVME to be set. Weird. . 1082 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". 1083 */ 1082 1084 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 1083 1085 … … 1085 1087 if (CPUMIsGuestInLongModeEx(pCtx)) 1086 1088 { 1087 pVmcb->guest.FS.u64Base 1088 pVmcb->guest.GS.u64Base 1089 pVmcb->guest.FS.u64Base = pCtx->fs.u64Base; 1090 pVmcb->guest.GS.u64Base = pCtx->gs.u64Base; 1089 1091 } 1090 1092 else … … 1103 1105 } 1104 1106 1107 1105 1108 /** 1106 1109 * Loads the guest debug registers into the VMCB. … … 1142 1145 if (!CPUMIsHyperDebugStateActive(pVCpu)) 1143 1146 { 1144 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, p MixedCtx, true /* include DR6 */);1147 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 1145 1148 AssertRC(rc); 1146 1149 … … 1153 1156 fInterceptMovDRx = true; 1154 1157 } 1155 else if (p MixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD))1158 else if (pCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) 1156 1159 { 1157 1160 if (!CPUMIsGuestDebugStateActive(pVCpu)) 1158 1161 { 1159 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, p MixedCtx, true /* include DR6 */);1162 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */); 1160 1163 AssertRC(rc); 1161 1164 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed); … … 1203 1206 * 1204 1207 * @returns VBox status code. 1205 * @param pVCpu Pointer to the VMCPU. 1206 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 1207 * out-of-sync. Make sure to update the required fields 1208 * before using them. 1208 * @param pVCpu Pointer to the VMCPU. 1209 * @param pCtx Pointer to the guest-CPU context. 1209 1210 * 1210 1211 * @remarks No-long-jump zone!!! … … 1249 1250 AssertPtr(pVM); 1250 1251 AssertPtr(pVCpu); 1251 AssertPtr(p MixedCtx);1252 AssertPtr(pCtx); 1252 1253 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1253 1254 … … 1257 1258 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 1258 1259 1259 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, p MixedCtx);1260 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pCtx); 1260 1261 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 1261 1262 … … 1272 1273 hmR0SvmLoadGuestDebugRegs(pVCpu, pCtx); 1273 1274 1274 rc = hmR0SvmSetupVMRunHandler(pVCpu, p MixedCtx);1275 rc = hmR0SvmSetupVMRunHandler(pVCpu, pCtx); 1275 1276 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 1276 1277 … … 1299 1300 static void hmR0SvmSetupTscOffsetting(PVMCPU pVCpu) 1300 1301 { 1301 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pvVmcb;1302 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1302 1303 if (TMCpuTickCanUseRealTSC(pVCpu, &pVmcb->ctrl.u64TSCOffset)) 1303 1304 { … … 1328 1329 1329 1330 /** 1330 * Posts a pending event (trap or external interrupt). An injected event should only 1331 * be written to the VMCB immediately before VMRUN, otherwise we might have stale events 1332 * injected across VM resets and suchlike. See @bugref{6220}. 1331 * Sets an event as a pending event to be injected into the guest. 1332 * 1333 * @param pVCpu Pointer to the VMCPU. 1334 * @param pEvent Pointer to the SVM event. 1335 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a 1336 * page-fault. 1337 */ 1338 DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, PSVMEVENT pEvent, RTGCUINTPTR GCPtrFaultAddress) 1339 { 1340 Assert(!pVCpu->hm.s.Event.fPending); 1341 1342 pVCpu->hm.s.Event.u64IntrInfo = pEvent->u; 1343 pVCpu->hm.s.Event.fPending = true; 1344 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress; 1345 1346 #ifdef VBOX_STRICT 1347 if (GCPtrFaultAddress) 1348 { 1349 AssertMsg( pEvent->n.u8Vector == X86_XCPT_PF 1350 && pEvent->n.u3Type == SVM_EVENT_EXCEPTION, 1351 ("hmR0SvmSetPendingEvent: Setting fault-address for non-#PF. u8Vector=%#x Type=%#RX32 GCPtrFaultAddr=%#RGx\n", 1352 pEvent->n.u8Vector, (uint32_t)pEvent->n.u3Type, GCPtrFaultAddress)); 1353 Assert(GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu)); 1354 } 1355 #endif 1356 1357 Log4(("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x ErrorCodeValid=%#x ErrorCode=%#RX32\n", pEvent->u, 1358 pEvent->n.u8Vector, pEvent->n.u3Type, (uint8_t)pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode)); 1359 } 1360 1361 1362 /** 1363 * Injects an event into the guest upon VMRUN by updating the relevant field 1364 * in the VMCB. 1333 1365 * 1334 1366 * @param pVCpu Pointer to the VMCPU. 1335 * @param pEvent Pointer to the SVM event. 1336 */ 1337 DECLINLINE(void) hmR0SvmSetPendingEvent(PVMCPU pVCpu, SVMEVENT *pEvent) 1338 { 1339 Log4(("SVM: Set pending event: intInfo=%#RX64\n", pEvent->u)); 1340 1341 /* If there's an event pending already, we're in trouble... */ 1367 * @param pVmcb Pointer to the guest VMCB. 1368 * @param pCtx Pointer to the guest-CPU context. 1369 * @param pEvent Pointer to the event. 1370 * 1371 * @remarks No-long-jump zone!!! 1372 * @remarks Requires CR0! 1373 */ 1374 DECLINLINE(void) hmR0SvmInjectEventVmcb(PVMCPU pVCpu, PSVMVMCB pVmcb, PCPUMCTX pCtx, PSVMEVENT pEvent) 1375 { 1376 pVmcb->ctrl.EventInject.u = pEvent->u; 1377 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[pEvent->n.u8Vector & MASK_INJECT_IRQ_STAT]); 1378 } 1379 1380 1381 /** 1382 * Converts any TRPM trap into a pending SVM event. This is typically used when 1383 * entering from ring-3 (not longjmp returns). 1384 * 1385 * @param pVCpu Pointer to the VMCPU. 1386 */ 1387 static void hmR0SvmTrpmTrapToPendingEvent(PVMCPU pVCpu) 1388 { 1389 Assert(TRPMHasTrap(pVCpu)); 1342 1390 Assert(!pVCpu->hm.s.Event.fPending); 1343 1391 1344 /* Set pending event state. */ 1345 pVCpu->hm.s.Event.u64IntrInfo = pEvent->u; 1346 pVCpu->hm.s.Event.fPending = true; 1347 } 1348 1392 uint8_t uVector; 1393 TRPMEVENT enmTrpmEvent; 1394 RTGCUINT uErrCode; 1395 RTGCUINTPTR GCPtrFaultAddress; 1396 uint8_t cbInstr; 1397 1398 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr); 1399 AssertRC(rc); 1400 1401 PSVMEVENT pEvent = &pVCpu->hm.s.Event; 1402 pEvent->u = 0; 1403 pEvent->n.u1Valid = 1; 1404 1405 /* Refer AMD spec. 15.20 "Event Injection" for the format. */ 1406 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID; 1407 if (enmTrpmEvent == TRPM_TRAP) 1408 { 1409 pEvent->n.u3Type = SVM_EVENT_EXCEPTION; 1410 switch (uVector) 1411 { 1412 case X86_XCPT_PF: 1413 case X86_XCPT_DF: 1414 case X86_XCPT_TS: 1415 case X86_XCPT_NP: 1416 case X86_XCPT_SS: 1417 case X86_XCPT_GP: 1418 case X86_XCPT_AC: 1419 { 1420 pEvent->n.u32ErrorCode = uErrCode; 1421 pEvent->n.u1ErrorCodeValid = 1; 1422 break; 1423 } 1424 } 1425 } 1426 else if (enmTrpmEvent == TRPM_HARDWARE_INT) 1427 { 1428 if (uVector == X86_XCPT_NMI) 1429 pEvent->n.u3Type = SVM_EVENT_NMI; 1430 else 1431 pEvent->n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 1432 } 1433 else if (enmTrpmEvent == TRPM_SOFTWARE_INT) 1434 pEvent->n.u3Type = SVM_EVENT_SOFTWARE_INT; 1435 else 1436 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent)); 1437 1438 rc = TRPMResetTrap(pVCpu); 1439 AssertRC(rc); 1440 1441 Log4(("TRPM->HM event: u=%#RX64 u8Vector=%#x uErrorCodeValid=%#x uErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector, 1442 pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode)); 1443 } 1444 1445 1446 /** 1447 * Converts any pending SVM event into a TRPM trap. Typically used when leaving 1448 * AMD-V to execute any instruction. 1449 * 1450 * @param pvCpu Pointer to the VMCPU. 1451 */ 1452 static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu) 1453 { 1454 Assert(pVCpu->hm.s.Event.fPending); 1455 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP); 1456 1457 PSVMEVENT pEvent = &pVCpu->hm.s.Event; 1458 uint8_t uVector = pEvent->n.u8Vector; 1459 uint8_t uVectorType = pEvent->n.u3Type; 1460 1461 TRPMEVENT enmTrapType; 1462 switch (uVectorType) 1463 { 1464 case SVM_EVENT_EXTERNAL_IRQ 1465 case SVM_EVENT_NMI: 1466 enmTrapType = TRPM_HARDWARE_INT; 1467 break; 1468 case SVM_EVENT_SOFTWARE_INT: 1469 enmTrapType = TRPM_SOFTWARE_INT; 1470 break; 1471 case SVM_EVENT_EXCEPTION: 1472 enmTrapType = TRPM_TRAP; 1473 break; 1474 default: 1475 AssertMsgFailed(("Invalid pending-event type %#x\n", uVectorType)); 1476 enmTrapType = TRPM_32BIT_HACK; 1477 break; 1478 } 1479 1480 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, uVectorType)); 1481 1482 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType); 1483 AssertRC(rc); 1484 1485 if (pEvent->n.u1ErrorCodeValid) 1486 TRPMSetErrorCode(pVCpu, pEvent->n.u32ErrorCode); 1487 1488 if ( uVectorType == SVM_EVENT_EXCEPTION 1489 && uVector == X86_XCPT_PF) 1490 { 1491 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress); 1492 Assert(pVCpu->hm.s.Event.GCPtrFaultAddress == CPUMGetGuestCR2(pVCpu)); 1493 } 1494 else if (uVectorType == SVM_EVENT_SOFTWARE_INT) 1495 { 1496 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT 1497 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF), 1498 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType)); 1499 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr); 1500 } 1501 pVCpu->hm.s.Event.fPending = false; 1502 } 1503 1504 1505 /** 1506 * Gets the guest's interrupt-shadow. 1507 * 1508 * @returns The guest's interrupt-shadow. 1509 * @param pVCpu Pointer to the VMCPU. 1510 * @param pCtx Pointer to the guest-CPU context. 1511 * 1512 * @remarks No-long-jump zone!!! 1513 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag. 1514 */ 1515 DECLINLINE(uint32_t) hmR0SvmGetGuestIntrShadow(PVMCPU pVCpu, PCPUMCTX pCtx) 1516 { 1517 /* 1518 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should 1519 * inhibit interrupts or clear any existing interrupt-inhibition. 1520 */ 1521 uint32_t uIntrState = 0; 1522 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 1523 { 1524 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu)) 1525 { 1526 /* 1527 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in 1528 * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct. 1529 */ 1530 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 1531 } 1532 else 1533 uIntrState = SVM_INTERRUPT_SHADOW_ACTIVE; 1534 } 1535 return uIntrState; 1536 } 1537 1538 1539 /** 1540 * Sets the virtual interrupt intercept control in the VMCB which 1541 * instructs AMD-V to cause a #VMEXIT as soon as the guest is in a state to 1542 * receive interrupts. 1543 * 1544 * @param pVmcb Pointer to the VMCB. 1545 */ 1546 DECLINLINE(void) hmR0SvmSetVirtIntrIntercept(PSVMVMCB pVmcb) 1547 { 1548 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_VINTR)) 1549 { 1550 pVmcb->ctrl.IntCtrl.n.u1VIrqValid = 1; /* A virtual interrupt is pending. */ 1551 pVmcb->ctrl.IntCtrl.n.u8VIrqVector = 0; /* Not necessary as we #VMEXIT for delivering the interrupt. */ 1552 pVmcb->ctrl.u32InterceptCtrl1 |= SVM_CTRL1_INTERCEPT_VINTR; 1553 pVmcb->u64VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_INTERCEPTS; 1554 } 1555 } 1556 1557 1558 /** 1559 * Injects any pending events into the guest if the guest is in a state to 1560 * receive them. 1561 * 1562 * @param pVCpu Pointer to the VMCPU. 1563 * @param pCtx Pointer to the guest-CPU context. 1564 */ 1565 static void hmR0SvmInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pCtx) 1566 { 1567 Assert(!TRPMHasTrap(pVCpu)); 1568 1569 const bool fIntShadow = !!hmR0SvmGetGuestIntrShadow(pVCpu, pCtx); 1570 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1571 1572 SVMEVENT Event; 1573 Event.u = 0; 1574 if (pVCpu->hm.s.Event.fPending) /* First, inject any pending HM events. */ 1575 { 1576 Event.u = pVCpu->hm.s.Event.u64IntrInfo; 1577 bool fInject = true; 1578 if ( fIntShadow 1579 && ( Event.n.u3Type == SVM_EVENT_EXTERNAL_IRQ 1580 || Event.n.u3Type == SVM_EVENT_NMI)) 1581 { 1582 fInject = false; 1583 } 1584 1585 if ( fInject 1586 && Event.n.u1Valid) 1587 { 1588 pVCpu->hm.s.Event.fPending = false; 1589 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event); 1590 } 1591 else 1592 hmR0SvmSetVirtIntrIntercept(pVmcb); 1593 } /** @todo SMI. SMIs take priority over NMIs. */ 1594 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */ 1595 { 1596 if (!fIntShadow) 1597 { 1598 Log4(("Injecting NMI\n")); 1599 Event.n.u1Valid = 1; 1600 Event.n.u8Vector = X86_XCPT_NMI; 1601 Event.n.u3Type = SVM_EVENT_NMI; 1602 1603 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event); 1604 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI); 1605 } 1606 else 1607 hmR0SvmSetVirtIntrIntercept(pVmcb); 1608 } 1609 else if (VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))) 1610 { 1611 /* Check if there are guest external interrupts (PIC/APIC) pending and inject them if the guest can receive them. */ 1612 const bool fBlockInt = !(pCtx->eflags.u32 & X86_EFL_IF); 1613 if ( !fBlockInt 1614 && !fIntShadow) 1615 { 1616 uint8_t u8Interrupt; 1617 rc = PDMGetInterrupt(pVCpu, &u8Interrupt); 1618 if (RT_SUCCESS(rc)) 1619 { 1620 Log4(("Injecting interrupt u8Interrupt=%#x\n", u8Interrupt)); 1621 1622 Event.n.u1Valid = 1; 1623 Event.n.u8Vector = u8Interrupt; 1624 Event.n.u3Type = SVM_EVENT_EXTERNAL_IRQ; 1625 1626 hmR0SvmInjectEvent(pVCpu, pVmcb, pCtx, &Event); 1627 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject); 1628 } 1629 else 1630 { 1631 /** @todo Does this actually happen? If not turn it into an assertion. */ 1632 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))); 1633 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq); 1634 } 1635 } 1636 else 1637 hmR0SvmSetVirtIntrIntercept(pVmcb); 1638 } 1639 1640 /* Update the guest interrupt shadow in the VMCB. */ 1641 pVmcb->ctrl.u64IntShadow = !!fIntShadow; 1642 } 1643 1644 1645 /** 1646 * Check per-VM and per-VCPU force flag actions that require us to go back to 1647 * ring-3 for one reason or another. 1648 * 1649 * @returns VBox status code (information status code included). 1650 * @retval VINF_SUCCESS if we don't have any actions that require going back to 1651 * ring-3. 1652 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync. 1653 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware 1654 * interrupts) 1655 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires 1656 * all EMTs to be in ring-3. 1657 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests. 1658 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return 1659 * to the EM loop. 1660 * 1661 * @param pVM Pointer to the VM. 1662 * @param pVCpu Pointer to the VMCPU. 1663 * @param pCtx Pointer to the guest-CPU context. 1664 */ 1665 static int hmR0SvmCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1666 { 1667 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 1668 1669 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA) 1670 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 1671 | VMCPU_FF_REQUEST | VMCPU_FF_HM_UPDATE_CR3)) 1672 { 1673 /* Pending HM CR3 sync. No PAE PDPEs (VMCPU_FF_HM_UPDATE_PAE_PDPES) on AMD-V. */ 1674 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 1675 { 1676 rc = PGMUpdateCR3(pVCpu, pCtx->cr3); 1677 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3); 1678 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)); 1679 } 1680 1681 /* Pending PGM C3 sync. */ 1682 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 1683 { 1684 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); 1685 if (rc != VINF_SUCCESS) 1686 { 1687 AssertRC(rc); 1688 Log4(("hmR0SvmCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc)); 1689 return rc; 1690 } 1691 } 1692 1693 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */ 1694 /* -XXX- what was that about single stepping? */ 1695 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK) 1696 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK)) 1697 { 1698 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 1699 rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 1700 Log4(("hmR0SvmCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc)); 1701 return rc; 1702 } 1703 1704 /* Pending VM request packets, such as hardware interrupts. */ 1705 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST) 1706 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 1707 { 1708 Log4(("hmR0SvmCheckForceFlags: Pending VM request forcing us back to ring-3\n")); 1709 return VINF_EM_PENDING_REQUEST; 1710 } 1711 1712 /* Pending PGM pool flushes. */ 1713 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 1714 { 1715 Log4(("hmR0SvmCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n")); 1716 return VINF_PGM_POOL_FLUSH_PENDING; 1717 } 1718 1719 /* Pending DMA requests. */ 1720 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 1721 { 1722 Log4(("hmR0SvmCheckForceFlags: Pending DMA request forcing us back to ring-3\n")); 1723 return VINF_EM_RAW_TO_R3; 1724 } 1725 } 1726 1727 /* Paranoia. */ 1728 Assert(rc != VERR_EM_INTERPRETER); 1729 return VINF_SUCCESS; 1730 } 1731 1732 1733 /** 1734 * Does the preparations before executing guest code in AMD-V. 1735 * 1736 * This may cause longjmps to ring-3 and may even result in rescheduling to the 1737 * recompiler. We must be cautious what we do here regarding committing 1738 * guest-state information into the the VMCB assuming we assuredly execute the 1739 * guest in AMD-V. If we fall back to the recompiler after updating the VMCB and 1740 * clearing the common-state (TRPM/forceflags), we must undo those changes so 1741 * that the recompiler can (and should) use them when it resumes guest 1742 * execution. Otherwise such operations must be done when we can no longer 1743 * exit to ring-3. 1744 * 1745 * @returns VBox status code (informational status codes included). 1746 * @retval VINF_SUCCESS if we can proceed with running the guest. 1747 * @retval VINF_* scheduling changes, we have to go back to ring-3. 1748 * 1749 * @param pVCpu Pointer to the VMCPU. 1750 * @param pCtx Pointer to the guest-CPU context. 1751 */ 1752 DECLINE(int) hmR0SvmPreRunGuest(PVMCPU pVCpu, PCPUMCTX pCtx) 1753 { 1754 /* Check force flag actions that might require us to go back to ring-3. */ 1755 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pCtx); 1756 if (rc != VINF_SUCCESS) 1757 return rc; 1758 1759 #ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1760 /* We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.) */ 1761 pVmxTransient->uEFlags = ASMIntDisableFlags(); 1762 if (RTThreadPreemptIsPending(NIL_RTTHREAD)) 1763 { 1764 ASMSetFlags(pVmxTransient->uEFlags); 1765 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq); 1766 /* Don't use VINF_EM_RAW_INTERRUPT_HYPER as we can't assume the host does kernel preemption. Maybe some day? */ 1767 return VINF_EM_RAW_INTERRUPT; 1768 } 1769 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM); 1770 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 1771 #endif 1772 1773 /** @todo -XXX- TPR patching. */ 1774 1775 /* Convert any pending TRPM traps to HM events for injection. */ 1776 if (TRPMHasTrap(pVCpu)) 1777 hmR0SvmTrpmTrapToPendingEvent(pVCpu); 1778 1779 hmR0SvmInjectPendingEvent(pVCpu, pCtx); 1780 return VINF_SUCCESS; 1781 } 1782 1783 1784 /** 1785 * Prepares to run guest code in VT-x and we've committed to doing so. This 1786 * means there is no backing out to ring-3 or anywhere else at this 1787 * point. 1788 * 1789 * @param pVM Pointer to the VM. 1790 * @param pVCpu Pointer to the VMCPU. 1791 * @param pCtx Pointer to the guest-CPU context. 1792 * 1793 * @remarks Called with preemption disabled. 1794 * @remarks No-long-jump zone!!! 1795 */ 1796 DECLINLINE(void) hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1797 { 1798 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1799 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1800 1801 #ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION 1802 /** @todo I don't see the point of this, VMMR0EntryFast() already disables interrupts for the entire period. */ 1803 pVmxTransient->uEFlags = ASMIntDisableFlags(); 1804 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 1805 #endif 1806 1807 /* 1808 * Re-enable nested paging (automatically disabled on every VM-exit). See AMD spec. 15.25.3 "Enabling Nested Paging". 1809 * We avoid changing the corresponding VMCB Clean Bit as we're not changing it to a different value since the previous run. 1810 */ 1811 /** @todo The above assumption could be wrong. It's not documented what 1812 * should be done wrt to the VMCB Clean Bit, but we'll find out the 1813 * hard way. */ 1814 pVmcb->ctrl.NestedPaging.n.u1NestedPaging = pVM->hm.s.fNestedPaging; 1815 1816 /* Load the guest state. */ 1817 int rc = SVMR0LoadGuestState(pVM, pVCpu, pCtx); 1818 AssertRC(rc); 1819 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags =%#x\n", pVCpu->hm.s.fContextUseFlags)); 1820 1821 } 1822 1823 1824 /** 1825 * Wrapper for running the guest code in AMD-V. 1826 * 1827 * @returns VBox strict status code. 1828 * @param pVM Pointer to the VM. 1829 * @param pVCpu Pointer to the VMCPU. 1830 * @param pCtx Pointer to the guest-CPU context. 1831 * 1832 * @remarks No-long-jump zone!!! 1833 */ 1834 DECLINLINE(int) hmR0SvmRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1835 { 1836 /* 1837 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations 1838 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper. 1839 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 1840 */ 1841 #ifdef VBOX_WITH_KERNEL_USING_XMM 1842 return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 1843 pVCpu->hm.s.svm.pfnVMRun); 1844 #else 1845 return pVCpu->hm.s.svm.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu); 1846 #endif 1847 } 1848 1849 1850 /** 1851 * Runs the guest code using AMD-V. 1852 * 1853 * @returns VBox status code. 1854 * @param pVM Pointer to the VM. 1855 * @param pVCpu Pointer to the VMCPU. 1856 * @param pCtx Pointer to the guest CPU context. 1857 */ 1858 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1859 { 1860 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 1861 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1862 1863 uint32_t cLoops = 0; 1864 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1865 int rc = VERR_INTERNAL_ERROR_5; 1866 1867 for (;; cLoops++) 1868 { 1869 Assert(!HMR0SuspendPending()); 1870 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), 1871 ("Illegal migration! Entered on CPU %u Current %u cLoops=%u\n", (unsigned)pVCpu->hm.s.idEnteredCpu, 1872 (unsigned)RTMpCpuId(), cLoops)); 1873 1874 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ 1875 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 1876 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx); 1877 if (rc != VINF_SUCCESS) 1878 break; 1879 1880 /* 1881 * No longjmps to ring-3 from this point on!!! 1882 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 1883 * This also disables flushing of the R0-logger instance (if any). 1884 */ 1885 VMMRZCallRing3Disable(pVCpu); 1886 VMMRZCallRing3RemoveNotification(pVCpu); 1887 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx); 1888 1889 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); 1890 1891 /** -XXX- todo */ 1892 } 1893 1894 return rc; 1895 }
Note:
See TracChangeset
for help on using the changeset viewer.