- Timestamp:
- Jun 12, 2013 1:39:25 PM (12 years ago)
- Location:
- trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r45875 r46508 1937 1937 /** Unexpected SVM patch type. */ 1938 1938 #define VERR_SVM_UNEXPECTED_PATCH_TYPE (-4058) 1939 /** Unable to start VM execution due to an invalid guest state. */ 1940 #define VERR_SVM_INVALID_GUEST_STATE (-4059) 1939 1941 /** @} */ 1940 1942 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r46504 r46508 67 67 /** @} */ 68 68 69 /** @name VMCB Clean Bits used for VMCB-state caching. */ 69 /** @name VMCB Clean Bits. 70 * 71 * These flags are used for VMCB-state caching. A set VMCB Clean Bit indicates 72 * AMD-V doesn't need to reload the corresponding value(s) from the VMCB in 73 * memory. 74 * 75 * @{ */ 70 76 /** All intercepts vectors, TSC offset, PAUSE filter counter. */ 71 77 #define HMSVM_VMCB_CLEAN_INTERCEPTS RT_BIT(0) … … 109 115 /** @} */ 110 116 111 /** @name SVM -transient.117 /** @name SVM transient. 112 118 * 113 119 * A state structure for holding miscellaneous information across AMD-V … … 119 125 /** The host's rflags/eflags. */ 120 126 RTCCUINTREG uEFlags; 127 #if HC_ARCH_BITS == 32 128 uint32_t u32Alignment0; 129 #endif 130 131 /** The #VMEXIT exit code. */ 132 uint64_t u64ExitCode; 121 133 } SVMTRANSIENT, *PSVMTRANSIENT; 122 134 /** @} */ … … 835 847 836 848 837 /**838 * Saves the host state.839 *840 * @returns VBox status code.841 * @param pVM Pointer to the VM.842 * @param pVCpu Pointer to the VMCPU.843 *844 * @remarks No-long-jump zone!!!845 */846 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu)847 {848 NOREF(pVM);849 NOREF(pVCpu);850 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */851 return VINF_SUCCESS;852 }853 854 855 849 DECLINLINE(void) hmR0SvmAddXcptIntercept(uint32_t u32Xcpt) 856 850 { … … 1264 1258 1265 1259 /** 1260 * Enters the AMD-V session. 1261 * 1262 * @returns VBox status code. 1263 * @param pVM Pointer to the VM. 1264 * @param pVCpu Pointer to the VMCPU. 1265 * @param pCpu Pointer to the CPU info struct. 1266 */ 1267 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu) 1268 { 1269 AssertPtr(pVM); 1270 AssertPtr(pVCpu); 1271 Assert(pVM->hm.s.svm.fSupported); 1272 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1273 NOREF(pCpu); 1274 1275 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 1276 1277 /* Nothing to do here. */ 1278 return VINF_SUCCESS; 1279 } 1280 1281 1282 /** 1283 * Leaves the AMD-V session. 1284 * 1285 * @returns VBox status code. 1286 * @param pVM Pointer to the VM. 1287 * @param pVCpu Pointer to the VMCPU. 1288 * @param pCtx Pointer to the guest-CPU context. 1289 */ 1290 VMMR0DECL(int) SVMR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 1291 { 1292 AssertPtr(pVCpu); 1293 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1294 NOREF(pVM); 1295 NOREF(pCtx); 1296 1297 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1298 1299 /** -xxx- todo. */ 1300 1301 return VINF_SUCCESS; 1302 } 1303 1304 1305 /** 1306 * Saves the host state. 1307 * 1308 * @returns VBox status code. 1309 * @param pVM Pointer to the VM. 1310 * @param pVCpu Pointer to the VMCPU. 1311 * 1312 * @remarks No-long-jump zone!!! 1313 */ 1314 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu) 1315 { 1316 NOREF(pVM); 1317 NOREF(pVCpu); 1318 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */ 1319 return VINF_SUCCESS; 1320 } 1321 1322 1323 /** 1266 1324 * Loads the guest state. 1267 1325 * … … 1315 1373 1316 1374 return rc; 1375 } 1376 1377 1378 /** 1379 * Saves the entire guest state from the VMCB into the 1380 * guest-CPU context. Currently there is no residual state left in the CPU that 1381 * is not updated in the VMCB. 1382 * 1383 * @returns VBox status code. 1384 * @param pVCpu Pointer to the VMCPU. 1385 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 1386 * out-of-sync. Make sure to update the required fields 1387 * before using them. 1388 */ 1389 static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 1390 { 1391 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 1392 1393 PSVMVMCB pVmcb = (PSVMVMCB)pVCpu->hm.s.svm.pvVmcb; 1394 1395 pMixedCtx->rip = pVmcb->guest.u64RIP; 1396 pMixedCtx->rsp = pVmcb->guest.u64RSP; 1397 pMixedCtx->eflags.u32 = pVmcb->guest.u64RFlags; 1398 pMixedCtx->rax = pVmcb->guest.u64RAX; 1399 1400 /* 1401 * Save all the MSRs that can be changed by the guest without causing a world switch. 1402 * FS & GS base are saved with HMSVM_SAVE_SEG_REG. 1403 */ 1404 pMixedCtx->msrSTAR = pVmcb->guest.u64STAR; /* legacy syscall eip, cs & ss */ 1405 pMixedCtx->msrLSTAR = pVmcb->guest.u64LSTAR; /* 64-bit mode syscall rip */ 1406 pMixedCtx->msrCSTAR = pVmcb->guest.u64CSTAR; /* compatibility mode syscall rip */ 1407 pMixedCtx->msrSFMASK = pVmcb->guest.u64SFMASK; /* syscall flag mask */ 1408 pMixedCtx->msrKERNELGSBASE = pVmcb->guest.u64KernelGSBase; /* swapgs exchange value */ 1409 pMixedCtx->SysEnter.cs = pVmcb->guest.u64SysEnterCS; 1410 pMixedCtx->SysEnter.eip = pVmcb->guest.u64SysEnterEIP; 1411 pMixedCtx->SysEnter.esp = pVmcb->guest.u64SysEnterESP; 1412 1413 /* Can be updated behind our back in the nested paging case. */ 1414 pMixedCtx->cr2 = pVmcb->guest.u64CR2; 1415 1416 /* Segment registers: CS, SS, DS, ES, FS, GS. */ 1417 HMSVM_SAVE_SEG_REG(CS, ss); 1418 HMSVM_SAVE_SEG_REG(SS, cs); 1419 HMSVM_SAVE_SEG_REG(DS, ds); 1420 HMSVM_SAVE_SEG_REG(ES, es); 1421 HMSVM_SAVE_SEG_REG(FS, fs); 1422 HMSVM_SAVE_SEG_REG(GS, gs); 1423 1424 /* 1425 * Correct the hidden CS granularity flag. Haven't seen it being wrong in any other 1426 * register (yet). 1427 */ 1428 /** @todo Verify this. */ 1429 if ( !pMixedCtx->cs.Attr.n.u1Granularity 1430 && pMixedCtx->cs.Attr.n.u1Present 1431 && pMixedCtx->cs.u32Limit > UINT32_C(0xfffff)) 1432 { 1433 Assert((pMixedCtx->cs.u32Limit & 0xfff) == 0xfff); 1434 pMixedCtx->cs.Attr.n.u1Granularity = 1; 1435 } 1436 #ifdef VBOX_STRICT 1437 # define HMSVM_ASSERT_SEL_GRANULARITY(reg) \ 1438 AssertMsg( !pMixedCtx->reg.Attr.n.u1Present \ 1439 || ( pMixedCtx->reg.Attr.n.u1Granularity \ 1440 ? (pMixedCtx->reg.u32Limit & 0xfff) == 0xfff \ 1441 : pMixedCtx->reg.u32Limit <= 0xfffff), \ 1442 ("Invalid Segment Attributes %#x %#x %#llx\n", pMixedCtx->reg.u32Limit, 1443 pMixedCtx->reg.Attr.u, pMixedCtx->reg.u64Base)) 1444 HMSVM_ASSERT_SEG_GRANULARITY(ss); 1445 HMSVM_ASSERT_SEG_GRANULARITY(cs); 1446 HMSVM_ASSERT_SEG_GRANULARITY(ds); 1447 HMSVM_ASSERT_SEG_GRANULARITY(es); 1448 HMSVM_ASSERT_SEG_GRANULARITY(fs); 1449 HMSVM_ASSERT_SEG_GRANULARITY(gs); 1450 # undef HMSVM_ASSERT_SEL_GRANULARITY 1451 #endif 1452 1453 /* 1454 * Correct the hidden SS DPL field. It can be wrong on certain CPUs sometimes (seen on 1455 * AMD Fusion CPUs with 64-bit guests). The CPU always uses the CPL field in the VMCB 1456 * instead of the DPL in the hidden SS. See AMD spec. 15.5.1 "Basic operation". 1457 */ 1458 /** @todo Verify this. */ 1459 Assert(!(pVmcb->guest.u8CPL & ~0x3)); 1460 pMixedCtx->ss.Attr.n.u2Dpl = pVmcb->guest.u8CPL & 0x3; 1461 1462 /* 1463 * Descriptor Table Registers: TR, IDTR, GDTR, LDTR. 1464 */ 1465 HMSVM_SAVE_SEG_REG(TR, tr); 1466 HMSVM_SAVE_SEG_REG(LDTR, ldtr); 1467 1468 pMixedCtx->gdtr.cbGdt = pVmcb->guest.GDTR.u32Limit; 1469 pMixedCtx->gdtr.pGdt = pVmcb->guest.GDTR.u64Base; 1470 1471 pMixedCtx->idtr.cbIdt = pVmcb->guest.IDTR.u32Limit; 1472 pMixedCtx->idtr.pIdt = pVmcb->guest.IDTR.u64Base; 1473 1474 /* 1475 * Debug registers. 1476 */ 1477 pMixedCtx->dr[6] = pVmcb->guest.u64DR6; 1478 pMixedCtx->dr[7] = pVmcb->guest.u64DR7; 1479 1480 /* 1481 * With Nested Paging, CR3 changes are not intercepted. Therefore, sync. it now. 1482 */ 1483 if ( pVM->hm.s.fNestedPaging 1484 && pMixedCtx->cr3 != pVmcb->guest.u64CR3) 1485 { 1486 CPUMSetGuestCR3(pVCpu, pVmcb->guest.u64CR3); 1487 PGMUpdateCR3(pVCpu, pVmcb->guest.u64CR3); /* This may longjmp to ring-3 hence done at the very end. */ 1488 } 1489 } 1490 1491 1492 /** 1493 * Does the necessary state syncing before doing a longjmp to ring-3. 1494 * 1495 * @param pVM Pointer to the VM. 1496 * @param pVCpu Pointer to the VMCPU. 1497 * @param pCtx Pointer to the guest-CPU context. 1498 * @param rcExit The reason for exiting to ring-3. Can be 1499 * VINF_VMM_UNKNOWN_RING3_CALL. 1500 * 1501 * @remarks No-long-jmp zone!!! 1502 */ 1503 static void hmR0SvmLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit) 1504 { 1505 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 1506 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 1507 1508 /* Restore FPU state if necessary and resync on next R0 reentry .*/ 1509 if (CPUMIsGuestFPUStateActive(pVCpu)) 1510 { 1511 CPUMR0SaveGuestFPU(pVM, pVCpu, pCtx); 1512 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 1513 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0; 1514 } 1515 1516 /* Restore debug registers if necessary and resync on next R0 reentry. */ 1517 if (CPUMIsGuestDebugStateActive(pVCpu)) 1518 { 1519 CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, true /* save DR6 */); 1520 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 1521 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG; 1522 } 1523 else if (CPUMIsHyperDebugStateActive(pVCpu)) 1524 { 1525 CPUMR0LoadHostDebugState(pVM, pVCpu); 1526 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 1527 Assert(pVmcb->ctrl.u16InterceptRdDRx == 0xffff); 1528 Assert(pVmcb->ctrl.u16InterceptWrDRx == 0xffff); 1529 } 1530 1531 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1532 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 1533 } 1534 1535 1536 /** 1537 * An action requires us to go back to ring-3. This function does the necessary 1538 * steps before we can safely return to ring-3. This is not the same as longjmps 1539 * to ring-3, this is voluntary. 1540 * 1541 * @param pVM Pointer to the VM. 1542 * @param pVCpu Pointer to the VMCPU. 1543 * @param pCtx Pointer to the guest-CPU context. 1544 * @param rcExit The reason for exiting to ring-3. Can be 1545 * VINF_VMM_UNKNOWN_RING3_CALL. 1546 */ 1547 static void hmR0SvmExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rcExit) 1548 { 1549 Assert(pVM); 1550 Assert(pVCpu); 1551 Assert(pCtx); 1552 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1553 1554 if (RT_UNLIKELY(rcExit == VERR_SVM_INVALID_GUEST_STATE)) 1555 { 1556 /* We don't need to do any syncing here, we're not going to come back to execute anything again. */ 1557 return; 1558 } 1559 1560 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 1561 VMMRZCallRing3Disable(pVCpu); 1562 Log4(("hmR0SvmExitToRing3: rcExit=%d\n", rcExit)); 1563 1564 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ 1565 if (pVCpu->hm.s.Event.fPending) 1566 { 1567 hmR0SvmPendingEventToTrpmTrap(pVCpu); 1568 Assert(!pVCpu->hm.s.Event.fPending); 1569 } 1570 1571 /* Sync. the guest state. */ 1572 hmR0SvmLongJmpToRing3(pVM, pVCpu, pCtx, rcExit); 1573 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3); 1574 1575 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 1576 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR 1577 | CPUM_CHANGED_LDTR 1578 | CPUM_CHANGED_GDTR 1579 | CPUM_CHANGED_IDTR 1580 | CPUM_CHANGED_TR 1581 | CPUM_CHANGED_HIDDEN_SEL_REGS); 1582 1583 /* On our way back from ring-3 the following needs to be done. */ 1584 /** @todo This can change with preemption hooks. */ 1585 if (rcExit == VINF_EM_RAW_INTERRUPT) 1586 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT; 1587 else 1588 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST; 1589 1590 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); 1591 VMMRZCallRing3Enable(pVCpu); 1317 1592 } 1318 1593 … … 1671 1946 1672 1947 /** 1948 * Reports world-switch error and dumps some useful debug info. 1949 * 1950 * @param pVM Pointer to the VM. 1951 * @param pVCpu Pointer to the VMCPU. 1952 * @param rcVMRun The return code from VMRUN (or 1953 * VERR_SVM_INVALID_GUEST_STATE for invalid 1954 * guest-state). 1955 * @param pCtx Pointer to the guest-CPU context. 1956 */ 1957 static void hmR0SvmReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx) 1958 { 1959 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1960 if (rcVMRun == VERR_SVM_INVALID_GUEST_STATE) 1961 { 1962 HMDumpRegs(pVM, pVCpu, pCtx); 1963 #ifdef VBOX_STRICT 1964 Log4(("ctrl.u16InterceptRdCRx %#x\n", pVmcb->ctrl.u16InterceptRdCRx)); 1965 Log4(("ctrl.u16InterceptWrCRx %#x\n", pVmcb->ctrl.u16InterceptWrCRx)); 1966 Log4(("ctrl.u16InterceptRdDRx %#x\n", pVmcb->ctrl.u16InterceptRdDRx)); 1967 Log4(("ctrl.u16InterceptWrDRx %#x\n", pVmcb->ctrl.u16InterceptWrDRx)); 1968 Log4(("ctrl.u32InterceptException %#x\n", pVmcb->ctrl.u32InterceptException)); 1969 Log4(("ctrl.u32InterceptCtrl1 %#x\n", pVmcb->ctrl.u32InterceptCtrl1)); 1970 Log4(("ctrl.u32InterceptCtrl2 %#x\n", pVmcb->ctrl.u32InterceptCtrl2)); 1971 Log4(("ctrl.u64IOPMPhysAddr %#RX64\n", pVmcb->ctrl.u64IOPMPhysAddr)); 1972 Log4(("ctrl.u64MSRPMPhysAddr %#RX64\n", pVmcb->ctrl.u64MSRPMPhysAddr)); 1973 Log4(("ctrl.u64TSCOffset %#RX64\n", pVmcb->ctrl.u64TSCOffset)); 1974 1975 Log4(("ctrl.TLBCtrl.u32ASID %#x\n", pVmcb->ctrl.TLBCtrl.n.u32ASID)); 1976 Log4(("ctrl.TLBCtrl.u8TLBFlush %#x\n", pVmcb->ctrl.TLBCtrl.n.u8TLBFlush)); 1977 Log4(("ctrl.TLBCtrl.u24Reserved %#x\n", pVmcb->ctrl.TLBCtrl.n.u24Reserved)); 1978 1979 Log4(("ctrl.IntCtrl.u8VTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u8VTPR)); 1980 Log4(("ctrl.IntCtrl.u1VIrqValid %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqValid)); 1981 Log4(("ctrl.IntCtrl.u7Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u7Reserved)); 1982 Log4(("ctrl.IntCtrl.u4VIrqPriority %#x\n", pVmcb->ctrl.IntCtrl.n.u4VIrqPriority)); 1983 Log4(("ctrl.IntCtrl.u1IgnoreTPR %#x\n", pVmcb->ctrl.IntCtrl.n.u1IgnoreTPR)); 1984 Log4(("ctrl.IntCtrl.u3Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u3Reserved)); 1985 Log4(("ctrl.IntCtrl.u1VIrqMasking %#x\n", pVmcb->ctrl.IntCtrl.n.u1VIrqMasking)); 1986 Log4(("ctrl.IntCtrl.u6Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u6Reserved)); 1987 Log4(("ctrl.IntCtrl.u8VIrqVector %#x\n", pVmcb->ctrl.IntCtrl.n.u8VIrqVector)); 1988 Log4(("ctrl.IntCtrl.u24Reserved %#x\n", pVmcb->ctrl.IntCtrl.n.u24Reserved)); 1989 1990 Log4(("ctrl.u64IntShadow %#RX64\n", pVmcb->ctrl.u64IntShadow)); 1991 Log4(("ctrl.u64ExitCode %#RX64\n", pVmcb->ctrl.u64ExitCode)); 1992 Log4(("ctrl.u64ExitInfo1 %#RX64\n", pVmcb->ctrl.u64ExitInfo1)); 1993 Log4(("ctrl.u64ExitInfo2 %#RX64\n", pVmcb->ctrl.u64ExitInfo2)); 1994 Log4(("ctrl.ExitIntInfo.u8Vector %#x\n", pVmcb->ctrl.ExitIntInfo.n.u8Vector)); 1995 Log4(("ctrl.ExitIntInfo.u3Type %#x\n", pVmcb->ctrl.ExitIntInfo.n.u3Type)); 1996 Log4(("ctrl.ExitIntInfo.u1ErrorCodeValid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid)); 1997 Log4(("ctrl.ExitIntInfo.u19Reserved %#x\n", pVmcb->ctrl.ExitIntInfo.n.u19Reserved)); 1998 Log4(("ctrl.ExitIntInfo.u1Valid %#x\n", pVmcb->ctrl.ExitIntInfo.n.u1Valid)); 1999 Log4(("ctrl.ExitIntInfo.u32ErrorCode %#x\n", pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode)); 2000 Log4(("ctrl.NestedPaging %#RX64\n", pVmcb->ctrl.NestedPaging.u)); 2001 Log4(("ctrl.EventInject.u8Vector %#x\n", pVmcb->ctrl.EventInject.n.u8Vector)); 2002 Log4(("ctrl.EventInject.u3Type %#x\n", pVmcb->ctrl.EventInject.n.u3Type)); 2003 Log4(("ctrl.EventInject.u1ErrorCodeValid %#x\n", pVmcb->ctrl.EventInject.n.u1ErrorCodeValid)); 2004 Log4(("ctrl.EventInject.u19Reserved %#x\n", pVmcb->ctrl.EventInject.n.u19Reserved)); 2005 Log4(("ctrl.EventInject.u1Valid %#x\n", pVmcb->ctrl.EventInject.n.u1Valid)); 2006 Log4(("ctrl.EventInject.u32ErrorCode %#x\n", pVmcb->ctrl.EventInject.n.u32ErrorCode)); 2007 2008 Log4(("ctrl.u64NestedPagingCR3 %#RX64\n", pVmcb->ctrl.u64NestedPagingCR3)); 2009 Log4(("ctrl.u64LBRVirt %#RX64\n", pVmcb->ctrl.u64LBRVirt)); 2010 2011 Log4(("guest.CS.u16Sel %RTsel\n", pVmcb->guest.CS.u16Sel)); 2012 Log4(("guest.CS.u16Attr %#x\n", pVmcb->guest.CS.u16Attr)); 2013 Log4(("guest.CS.u32Limit %#RX32\n", pVmcb->guest.CS.u32Limit)); 2014 Log4(("guest.CS.u64Base %#RX64\n", pVmcb->guest.CS.u64Base)); 2015 Log4(("guest.DS.u16Sel %#RTsel\n", pVmcb->guest.DS.u16Sel)); 2016 Log4(("guest.DS.u16Attr %#x\n", pVmcb->guest.DS.u16Attr)); 2017 Log4(("guest.DS.u32Limit %#RX32\n", pVmcb->guest.DS.u32Limit)); 2018 Log4(("guest.DS.u64Base %#RX64\n", pVmcb->guest.DS.u64Base)); 2019 Log4(("guest.ES.u16Sel %RTsel\n", pVmcb->guest.ES.u16Sel)); 2020 Log4(("guest.ES.u16Attr %#x\n", pVmcb->guest.ES.u16Attr)); 2021 Log4(("guest.ES.u32Limit %#RX32\n", pVmcb->guest.ES.u32Limit)); 2022 Log4(("guest.ES.u64Base %#RX64\n", pVmcb->guest.ES.u64Base)); 2023 Log4(("guest.FS.u16Sel %RTsel\n", pVmcb->guest.FS.u16Sel)); 2024 Log4(("guest.FS.u16Attr %#x\n", pVmcb->guest.FS.u16Attr)); 2025 Log4(("guest.FS.u32Limit %#RX32\n", pVmcb->guest.FS.u32Limit)); 2026 Log4(("guest.FS.u64Base %#RX64\n", pVmcb->guest.FS.u64Base)); 2027 Log4(("guest.GS.u16Sel %RTsel\n", pVmcb->guest.GS.u16Sel)); 2028 Log4(("guest.GS.u16Attr %#x\n", pVmcb->guest.GS.u16Attr)); 2029 Log4(("guest.GS.u32Limit %#RX32\n", pVmcb->guest.GS.u32Limit)); 2030 Log4(("guest.GS.u64Base %#RX64\n", pVmcb->guest.GS.u64Base)); 2031 2032 Log4(("guest.GDTR.u32Limit %#RX32\n", pVmcb->guest.GDTR.u32Limit)); 2033 Log4(("guest.GDTR.u64Base %#RX64\n", pVmcb->guest.GDTR.u64Base)); 2034 2035 Log4(("guest.LDTR.u16Sel %RTsel\n", pVmcb->guest.LDTR.u16Sel)); 2036 Log4(("guest.LDTR.u16Attr %#x\n", pVmcb->guest.LDTR.u16Attr)); 2037 Log4(("guest.LDTR.u32Limit %#RX32\n", pVmcb->guest.LDTR.u32Limit)); 2038 Log4(("guest.LDTR.u64Base %#RX64\n", pVmcb->guest.LDTR.u64Base)); 2039 2040 Log4(("guest.IDTR.u32Limit %#RX32\n", pVmcb->guest.IDTR.u32Limit)); 2041 Log4(("guest.IDTR.u64Base %#RX64\n", pVmcb->guest.IDTR.u64Base)); 2042 2043 Log4(("guest.TR.u16Sel %RTsel\n", pVmcb->guest.TR.u16Sel)); 2044 Log4(("guest.TR.u16Attr %#x\n", pVmcb->guest.TR.u16Attr)); 2045 Log4(("guest.TR.u32Limit %#RX32\n", pVmcb->guest.TR.u32Limit)); 2046 Log4(("guest.TR.u64Base %#RX64\n", pVmcb->guest.TR.u64Base)); 2047 4 2048 Log4(("guest.u8CPL %#x\n", pVmcb->guest.u8CPL)); 2049 Log4(("guest.u64CR0 %#RX64\n", pVmcb->guest.u64CR0)); 2050 Log4(("guest.u64CR2 %#RX64\n", pVmcb->guest.u64CR2)); 2051 Log4(("guest.u64CR3 %#RX64\n", pVmcb->guest.u64CR3)); 2052 Log4(("guest.u64CR4 %#RX64\n", pVmcb->guest.u64CR4)); 2053 Log4(("guest.u64DR6 %#RX64\n", pVmcb->guest.u64DR6)); 2054 Log4(("guest.u64DR7 %#RX64\n", pVmcb->guest.u64DR7)); 2055 2056 Log4(("guest.u64RIP %#RX64\n", pVmcb->guest.u64RIP)); 2057 Log4(("guest.u64RSP %#RX64\n", pVmcb->guest.u64RSP)); 2058 Log4(("guest.u64RAX %#RX64\n", pVmcb->guest.u64RAX)); 2059 Log4(("guest.u64RFlags %#RX64\n", pVmcb->guest.u64RFlags)); 2060 2061 Log4(("guest.u64SysEnterCS %#RX64\n", pVmcb->guest.u64SysEnterCS)); 2062 Log4(("guest.u64SysEnterEIP %#RX64\n", pVmcb->guest.u64SysEnterEIP)); 2063 Log4(("guest.u64SysEnterESP %#RX64\n", pVmcb->guest.u64SysEnterESP)); 2064 2065 Log4(("guest.u64EFER %#RX64\n", pVmcb->guest.u64EFER)); 2066 Log4(("guest.u64STAR %#RX64\n", pVmcb->guest.u64STAR)); 2067 Log4(("guest.u64LSTAR %#RX64\n", pVmcb->guest.u64LSTAR)); 2068 Log4(("guest.u64CSTAR %#RX64\n", pVmcb->guest.u64CSTAR)); 2069 Log4(("guest.u64SFMASK %#RX64\n", pVmcb->guest.u64SFMASK)); 2070 Log4(("guest.u64KernelGSBase %#RX64\n", pVmcb->guest.u64KernelGSBase)); 2071 Log4(("guest.u64GPAT %#RX64\n", pVmcb->guest.u64GPAT)); 2072 Log4(("guest.u64DBGCTL %#RX64\n", pVmcb->guest.u64DBGCTL)); 2073 Log4(("guest.u64BR_FROM %#RX64\n", pVmcb->guest.u64BR_FROM)); 2074 Log4(("guest.u64BR_TO %#RX64\n", pVmcb->guest.u64BR_TO)); 2075 Log4(("guest.u64LASTEXCPFROM %#RX64\n", pVmcb->guest.u64LASTEXCPFROM)); 2076 Log4(("guest.u64LASTEXCPTO %#RX64\n", pVmcb->guest.u64LASTEXCPTO)); 2077 #endif 2078 } 2079 else 2080 Log4(("hmR0SvmReportWorldSwitchError: rcVMRun=%d\n", rcVMRun)); 2081 } 2082 2083 2084 /** 1673 2085 * Check per-VM and per-VCPU force flag actions that require us to go back to 1674 2086 * ring-3 for one reason or another. … … 1799 2211 #endif 1800 2212 1801 /** @todo -XXX- TPR patching. */1802 1803 2213 /* Convert any pending TRPM traps to HM events for injection. */ 2214 /** @todo Optimization: move this before disabling interrupts, restore state 2215 * using pVmcb->ctrl.EventInject.u. */ 1804 2216 if (TRPMHasTrap(pVCpu)) 1805 2217 hmR0SvmTrpmTrapToPendingEvent(pVCpu); 1806 2218 1807 2219 hmR0SvmInjectPendingEvent(pVCpu, pCtx); 2220 2221 /** @todo -XXX- TPR patching. */ 1808 2222 return VINF_SUCCESS; 1809 2223 } … … 1833 2247 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); 1834 2248 #endif 2249 2250 /* -XXX- todo TPR syncing. */ 1835 2251 1836 2252 /* … … 1863 2279 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc). 1864 2280 */ 1865 u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX; 1866 if ( (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 2281 if ( (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1867 2282 && !(pVmcb->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP)) 1868 2283 { … … 1894 2309 */ 1895 2310 #ifdef VBOX_WITH_KERNEL_USING_XMM 1896 HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu,1897 pVCpu->hm.s.svm.pfnVMRun);2311 return HMR0SVMRunWrapXMM(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu, 2312 pVCpu->hm.s.svm.pfnVMRun); 1898 2313 #else 1899 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu);2314 return pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVmcbHost, pVCpu->hm.s.svm.HCPhysVmcb, pCtx, pVM, pVCpu); 1900 2315 #endif 1901 2316 } … … 1931 2346 if (!(pVmcb->ctrl.u32InterceptCtrl1 & SVM_CTRL1_INTERCEPT_RDTSC)) 1932 2347 { 1933 if ( u32HostExtFeatures& X86_CPUID_EXT_FEATURE_EDX_RDTSCP)2348 if (pVM->hm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1934 2349 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux); 1935 2350 … … 1946 2361 1947 2362 ASMSetFlags(pSvmTransient->uEFlags); /* Enable interrupts. */ 1948 1949 /* --XXX- todo */ 1950 } 1951 2363 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pMixedCtx); 2364 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */ 2365 2366 pSvmTransient->u64ExitCode = pVmcb->ctrl.u64ExitCode; 2367 hmR0SvmSaveGuestState(pVCpu, pMixedCtx); /* Save the guest state from the VMCB to the guest-CPU context. */ 2368 2369 /* --XXX- TPR syncing todo */ 2370 2371 /* -XXX- premature interruption during event injection */ 2372 } 1952 2373 1953 2374 … … 1958 2379 * @param pVM Pointer to the VM. 1959 2380 * @param pVCpu Pointer to the VMCPU. 1960 * @param pCtx Pointer to the guest 2381 * @param pCtx Pointer to the guest-CPU context. 1961 2382 */ 1962 2383 VMMR0DECL(int) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) … … 1979 2400 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */ 1980 2401 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 1981 rc = hmR0 VmxPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient);2402 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient); 1982 2403 if (rc != VINF_SUCCESS) 1983 2404 break; … … 1994 2415 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); 1995 2416 1996 /** -XXX- todo */ 1997 } 1998 2417 /* 2418 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state. 2419 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!! 2420 */ 2421 hmR0SvmPostRunGuest(pVM, pVCpu, pCtx, &SvmTransient, rc); 2422 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for errors with running the VM (VMRUN). */ 2423 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for errors due to invalid guest state. */ 2424 { 2425 if (rc == VINF_SUCCESS); 2426 rc = VERR_SVM_INVALID_GUEST_STATE; 2427 hmR0SvmReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &SvmTransient); 2428 return rc; 2429 } 2430 2431 /* Handle the #VMEXIT. */ 2432 AssertMsg(SvmTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason)); 2433 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]); 2434 2435 /* -xxx- todo. */ 2436 2437 if (rc != VINF_SUCCESS) 2438 break; 2439 else if (cLoops > pVM->hm.s.cMaxResumeLoops) 2440 { 2441 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume); 2442 rc = VINF_EM_RAW_INTERRUPT; 2443 break; 2444 } 2445 } 2446 2447 if (rc == VERR_EM_INTERPRETER) 2448 rc = VINF_EM_RAW_EMULATE_INSTR; 2449 else if (rc == VINF_EM_RESET) 2450 rc = VINF_EM_TRIPLE_FAULT; 2451 hmR0SvmExitToRing3(pVM, pVCpu, pCtx, rc); 1999 2452 return rc; 2000 2453 } 2454
Note:
See TracChangeset
for help on using the changeset viewer.