VirtualBox

Changeset 74510 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Sep 28, 2018 5:51:48 AM (6 years ago)
Author:
vboxsync
Message:

VMM/IEM: Nested VMX: bugref:9180 VM-exit handlers for VM-exits caused by instruction execution. INS/OUTS still a todo.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r74470 r74510  
    12411241
    12421242/**
     1243 * Sets the VM-exit guest-linear address VMCS field.
     1244 *
     1245 * @param   pVCpu               The cross context virtual CPU structure.
     1246 * @param   uGuestLinearAddr    The VM-exit guest-linear address field.
     1247 */
     1248DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestLinearAddr(PVMCPU pVCpu, uint64_t uGuestLinearAddr)
     1249{
     1250    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1251    pVmcs->u64RoGuestLinearAddr.u = uGuestLinearAddr;
     1252}
     1253
     1254
     1255/**
     1256 * Sets the VM-exit guest-physical address VMCS field.
     1257 *
     1258 * @param   pVCpu           The cross context virtual CPU structure.
     1259 * @param   uGuestPhysAddr  The VM-exit guest-physical address field.
     1260 */
     1261DECL_FORCE_INLINE(void) iemVmxVmcsSetExitGuestPhysAddr(PVMCPU pVCpu, uint64_t uGuestPhysAddr)
     1262{
     1263    PVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     1264    pVmcs->u64RoGuestPhysAddr.u = uGuestPhysAddr;
     1265}
     1266
     1267
     1268/**
    12431269 * Sets the VM-exit instruction length VMCS field.
    12441270 *
     
    13651391    iemVmxVmSucceed(pVCpu);
    13661392    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1367 }
    1368 
    1369 
    1370 /**
    1371  * VMREAD common (memory/register) instruction execution worker
    1372  *
    1373  * @returns Strict VBox status code.
    1374  * @param   pVCpu           The cross context virtual CPU structure.
    1375  * @param   cbInstr         The instruction length.
    1376  * @param   pu64Dst         Where to write the VMCS value (only updated when
    1377  *                          VINF_SUCCESS is returned).
    1378  * @param   u64FieldEnc     The VMCS field encoding.
    1379  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1380  *                          be NULL.
    1381  */
    1382 IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
    1383                                            PCVMXVEXITINFO pExitInfo)
    1384 {
    1385     if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    1386     {
    1387         RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
    1388         /** @todo NSTVMX: intercept. */
    1389         /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
    1390     }
    1391 
    1392     /* CPL. */
    1393     if (pVCpu->iem.s.uCpl > 0)
    1394     {
    1395         Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    1396         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
    1397         return iemRaiseGeneralProtectionFault0(pVCpu);
    1398     }
    1399 
    1400     /* VMCS pointer in root mode. */
    1401     if (    IEM_IS_VMX_ROOT_MODE(pVCpu)
    1402         && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
    1403     {
    1404         Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
    1405         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
    1406         iemVmxVmFailInvalid(pVCpu);
    1407         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1408         return VINF_SUCCESS;
    1409     }
    1410 
    1411     /* VMCS-link pointer in non-root mode. */
    1412     if (    IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
    1413         && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
    1414     {
    1415         Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
    1416         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
    1417         iemVmxVmFailInvalid(pVCpu);
    1418         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1419         return VINF_SUCCESS;
    1420     }
    1421 
    1422     /* Supported VMCS field. */
    1423     if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
    1424     {
    1425         Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
    1426         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
    1427         iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
    1428         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1429         return VINF_SUCCESS;
    1430     }
    1431 
    1432     /*
    1433      * Setup reading from the current or shadow VMCS.
    1434      */
    1435     uint8_t *pbVmcs;
    1436     if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    1437         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
    1438     else
    1439         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1440     Assert(pbVmcs);
    1441 
    1442     VMXVMCSFIELDENC FieldEnc;
    1443     FieldEnc.u = RT_LO_U32(u64FieldEnc);
    1444     uint8_t  const uWidth     = FieldEnc.n.u2Width;
    1445     uint8_t  const uType      = FieldEnc.n.u2Type;
    1446     uint8_t  const uWidthType = (uWidth << 2) | uType;
    1447     uint8_t  const uIndex     = FieldEnc.n.u8Index;
    1448     AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
    1449     uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
    1450 
    1451     /*
    1452      * Read the VMCS component based on the field's effective width.
    1453      *
    1454      * The effective width is 64-bit fields adjusted to 32-bits if the access-type
    1455      * indicates high bits (little endian).
    1456      *
    1457      * Note! The caller is responsible to trim the result and update registers
    1458      * or memory locations are required. Here we just zero-extend to the largest
    1459      * type (i.e. 64-bits).
    1460      */
    1461     uint8_t      *pbField = pbVmcs + offField;
    1462     uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
    1463     switch (uEffWidth)
    1464     {
    1465         case VMX_VMCS_ENC_WIDTH_64BIT:
    1466         case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
    1467         case VMX_VMCS_ENC_WIDTH_32BIT:   *pu64Dst = *(uint32_t *)pbField; break;
    1468         case VMX_VMCS_ENC_WIDTH_16BIT:   *pu64Dst = *(uint16_t *)pbField; break;
    1469     }
    1470     return VINF_SUCCESS;
    1471 }
    1472 
    1473 
    1474 /**
    1475  * VMREAD (64-bit register) instruction execution worker.
    1476  *
    1477  * @returns Strict VBox status code.
    1478  * @param   pVCpu           The cross context virtual CPU structure.
    1479  * @param   cbInstr         The instruction length.
    1480  * @param   pu64Dst         Where to store the VMCS field's value.
    1481  * @param   u64FieldEnc     The VMCS field encoding.
    1482  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1483  *                          be NULL.
    1484  */
    1485 IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
    1486                                           PCVMXVEXITINFO pExitInfo)
    1487 {
    1488     VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
    1489     if (rcStrict == VINF_SUCCESS)
    1490     {
    1491         iemVmxVmreadSuccess(pVCpu, cbInstr);
    1492         return VINF_SUCCESS;
    1493     }
    1494 
    1495     Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1496     return rcStrict;
    1497 }
    1498 
    1499 
    1500 /**
    1501  * VMREAD (32-bit register) instruction execution worker.
    1502  *
    1503  * @returns Strict VBox status code.
    1504  * @param   pVCpu           The cross context virtual CPU structure.
    1505  * @param   cbInstr         The instruction length.
    1506  * @param   pu32Dst         Where to store the VMCS field's value.
    1507  * @param   u32FieldEnc     The VMCS field encoding.
    1508  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1509  *                          be NULL.
    1510  */
    1511 IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
    1512                                           PCVMXVEXITINFO pExitInfo)
    1513 {
    1514     uint64_t u64Dst;
    1515     VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
    1516     if (rcStrict == VINF_SUCCESS)
    1517     {
    1518         *pu32Dst = u64Dst;
    1519         iemVmxVmreadSuccess(pVCpu, cbInstr);
    1520         return VINF_SUCCESS;
    1521     }
    1522 
    1523     Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1524     return rcStrict;
    1525 }
    1526 
    1527 
    1528 /**
    1529  * VMREAD (memory) instruction execution worker.
    1530  *
    1531  * @returns Strict VBox status code.
    1532  * @param   pVCpu           The cross context virtual CPU structure.
    1533  * @param   cbInstr         The instruction length.
    1534  * @param   iEffSeg         The effective segment register to use with @a u64Val.
    1535  *                          Pass UINT8_MAX if it is a register access.
    1536  * @param   enmEffAddrMode  The effective addressing mode (only used with memory
    1537  *                          operand).
    1538  * @param   GCPtrDst        The guest linear address to store the VMCS field's
    1539  *                          value.
    1540  * @param   u64FieldEnc     The VMCS field encoding.
    1541  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1542  *                          be NULL.
    1543  */
    1544 IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
    1545                                         RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
    1546 {
    1547     uint64_t u64Dst;
    1548     VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
    1549     if (rcStrict == VINF_SUCCESS)
    1550     {
    1551         /*
    1552          * Write the VMCS field's value to the location specified in guest-memory.
    1553          *
    1554          * The pointer size depends on the address size (address-size prefix allowed).
    1555          * The operand size depends on IA-32e mode (operand-size prefix not allowed).
    1556          */
    1557         static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
    1558         Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
    1559         GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
    1560 
    1561         if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    1562             rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
    1563         else
    1564             rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
    1565         if (rcStrict == VINF_SUCCESS)
    1566         {
    1567             iemVmxVmreadSuccess(pVCpu, cbInstr);
    1568             return VINF_SUCCESS;
    1569         }
    1570 
    1571         Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
    1572         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
    1573         return rcStrict;
    1574     }
    1575 
    1576     Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1577     return rcStrict;
    1578 }
    1579 
    1580 
    1581 /**
    1582  * VMWRITE instruction execution worker.
    1583  *
    1584  * @returns Strict VBox status code.
    1585  * @param   pVCpu           The cross context virtual CPU structure.
    1586  * @param   cbInstr         The instruction length.
    1587  * @param   iEffSeg         The effective segment register to use with @a u64Val.
    1588  *                          Pass UINT8_MAX if it is a register access.
    1589  * @param   enmEffAddrMode  The effective addressing mode (only used with memory
    1590  *                          operand).
    1591  * @param   u64Val          The value to write (or guest linear address to the
    1592  *                          value), @a iEffSeg will indicate if it's a memory
    1593  *                          operand.
    1594  * @param   u64FieldEnc     The VMCS field encoding.
    1595  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1596  *                          be NULL.
    1597  */
    1598 IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
    1599                                       uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
    1600 {
    1601     if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    1602     {
    1603         RT_NOREF(pExitInfo);
    1604         /** @todo NSTVMX: intercept. */
    1605         /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
    1606     }
    1607 
    1608     /* CPL. */
    1609     if (pVCpu->iem.s.uCpl > 0)
    1610     {
    1611         Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    1612         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
    1613         return iemRaiseGeneralProtectionFault0(pVCpu);
    1614     }
    1615 
    1616     /* VMCS pointer in root mode. */
    1617     if (    IEM_IS_VMX_ROOT_MODE(pVCpu)
    1618         && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
    1619     {
    1620         Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
    1621         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
    1622         iemVmxVmFailInvalid(pVCpu);
    1623         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1624         return VINF_SUCCESS;
    1625     }
    1626 
    1627     /* VMCS-link pointer in non-root mode. */
    1628     if (    IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
    1629         && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
    1630     {
    1631         Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
    1632         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
    1633         iemVmxVmFailInvalid(pVCpu);
    1634         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1635         return VINF_SUCCESS;
    1636     }
    1637 
    1638     /* If the VMWRITE instruction references memory, access the specified memory operand. */
    1639     bool const fIsRegOperand = iEffSeg == UINT8_MAX;
    1640     if (!fIsRegOperand)
    1641     {
    1642         static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
    1643         Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
    1644         RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
    1645 
    1646         /* Read the value from the specified guest memory location. */
    1647         VBOXSTRICTRC rcStrict;
    1648         if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
    1649             rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
    1650         else
    1651         {
    1652             uint32_t u32Val;
    1653             rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
    1654             u64Val = u32Val;
    1655         }
    1656         if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    1657         {
    1658             Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
    1659             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
    1660             return rcStrict;
    1661         }
    1662     }
    1663     else
    1664         Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
    1665 
    1666     /* Supported VMCS field. */
    1667     if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
    1668     {
    1669         Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
    1670         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
    1671         iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
    1672         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1673         return VINF_SUCCESS;
    1674     }
    1675 
    1676     /* Read-only VMCS field. */
    1677     bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
    1678     if (   fReadOnlyField
    1679         && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
    1680     {
    1681         Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
    1682         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
    1683         iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
    1684         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1685         return VINF_SUCCESS;
    1686     }
    1687 
    1688     /*
    1689      * Setup writing to the current or shadow VMCS.
    1690      */
    1691     uint8_t *pbVmcs;
    1692     if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    1693         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
    1694     else
    1695         pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
    1696     Assert(pbVmcs);
    1697 
    1698     VMXVMCSFIELDENC FieldEnc;
    1699     FieldEnc.u = RT_LO_U32(u64FieldEnc);
    1700     uint8_t  const uWidth     = FieldEnc.n.u2Width;
    1701     uint8_t  const uType      = FieldEnc.n.u2Type;
    1702     uint8_t  const uWidthType = (uWidth << 2) | uType;
    1703     uint8_t  const uIndex     = FieldEnc.n.u8Index;
    1704     AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
    1705     uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
    1706 
    1707     /*
    1708      * Write the VMCS component based on the field's effective width.
    1709      *
    1710      * The effective width is 64-bit fields adjusted to 32-bits if the access-type
    1711      * indicates high bits (little endian).
    1712      */
    1713     uint8_t      *pbField = pbVmcs + offField;
    1714     uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
    1715     switch (uEffWidth)
    1716     {
    1717         case VMX_VMCS_ENC_WIDTH_64BIT:
    1718         case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
    1719         case VMX_VMCS_ENC_WIDTH_32BIT:   *(uint32_t *)pbField = u64Val; break;
    1720         case VMX_VMCS_ENC_WIDTH_16BIT:   *(uint16_t *)pbField = u64Val; break;
    1721     }
    1722 
    1723     iemVmxVmSucceed(pVCpu);
    1724     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1725     return VINF_SUCCESS;
    1726 }
    1727 
    1728 
    1729 /**
    1730  * VMCLEAR instruction execution worker.
    1731  *
    1732  * @returns Strict VBox status code.
    1733  * @param   pVCpu           The cross context virtual CPU structure.
    1734  * @param   cbInstr         The instruction length.
    1735  * @param   iEffSeg         The effective segment register to use with @a GCPtrVmcs.
    1736  * @param   GCPtrVmcs       The linear address of the VMCS pointer.
    1737  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1738  *                          be NULL.
    1739  *
    1740  * @remarks Common VMX instruction checks are already expected to by the caller,
    1741  *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
    1742  */
    1743 IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
    1744                                       PCVMXVEXITINFO pExitInfo)
    1745 {
    1746     if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    1747     {
    1748         RT_NOREF(pExitInfo);
    1749         /** @todo NSTVMX: intercept. */
    1750     }
    1751     Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
    1752 
    1753     /* CPL. */
    1754     if (pVCpu->iem.s.uCpl > 0)
    1755     {
    1756         Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    1757         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
    1758         return iemRaiseGeneralProtectionFault0(pVCpu);
    1759     }
    1760 
    1761     /* Get the VMCS pointer from the location specified by the source memory operand. */
    1762     RTGCPHYS GCPhysVmcs;
    1763     VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
    1764     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    1765     {
    1766         Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
    1767         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
    1768         return rcStrict;
    1769     }
    1770 
    1771     /* VMCS pointer alignment. */
    1772     if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
    1773     {
    1774         Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
    1775         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
    1776         iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
    1777         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1778         return VINF_SUCCESS;
    1779     }
    1780 
    1781     /* VMCS physical-address width limits. */
    1782     if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    1783     {
    1784         Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
    1785         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
    1786         iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
    1787         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1788         return VINF_SUCCESS;
    1789     }
    1790 
    1791     /* VMCS is not the VMXON region. */
    1792     if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
    1793     {
    1794         Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
    1795         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
    1796         iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
    1797         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1798         return VINF_SUCCESS;
    1799     }
    1800 
    1801     /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
    1802        restriction imposed by our implementation. */
    1803     if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
    1804     {
    1805         Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
    1806         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
    1807         iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
    1808         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1809         return VINF_SUCCESS;
    1810     }
    1811 
    1812     /*
    1813      * VMCLEAR allows committing and clearing any valid VMCS pointer.
    1814      *
    1815      * If the current VMCS is the one being cleared, set its state to 'clear' and commit
    1816      * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
    1817      * to 'clear'.
    1818      */
    1819     uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
    1820     if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
    1821     {
    1822         Assert(GCPhysVmcs != NIL_RTGCPHYS);                     /* Paranoia. */
    1823         Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
    1824         pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
    1825         iemVmxCommitCurrentVmcsToMemory(pVCpu);
    1826         Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
    1827     }
    1828     else
    1829     {
    1830         rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
    1831                                             (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
    1832     }
    1833 
    1834     iemVmxVmSucceed(pVCpu);
    1835     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1836     return rcStrict;
    1837 }
    1838 
    1839 
    1840 /**
    1841  * VMPTRST instruction execution worker.
    1842  *
    1843  * @returns Strict VBox status code.
    1844  * @param   pVCpu           The cross context virtual CPU structure.
    1845  * @param   cbInstr         The instruction length.
    1846  * @param   iEffSeg         The effective segment register to use with @a GCPtrVmcs.
    1847  * @param   GCPtrVmcs       The linear address of where to store the current VMCS
    1848  *                          pointer.
    1849  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1850  *                          be NULL.
    1851  *
    1852  * @remarks Common VMX instruction checks are already expected to by the caller,
    1853  *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
    1854  */
    1855 IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
    1856                                       PCVMXVEXITINFO pExitInfo)
    1857 {
    1858     if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    1859     {
    1860         RT_NOREF(pExitInfo);
    1861         /** @todo NSTVMX: intercept. */
    1862     }
    1863     Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
    1864 
    1865     /* CPL. */
    1866     if (pVCpu->iem.s.uCpl > 0)
    1867     {
    1868         Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    1869         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
    1870         return iemRaiseGeneralProtectionFault0(pVCpu);
    1871     }
    1872 
    1873     /* Set the VMCS pointer to the location specified by the destination memory operand. */
    1874     AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
    1875     VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
    1876     if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    1877     {
    1878         iemVmxVmSucceed(pVCpu);
    1879         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1880         return rcStrict;
    1881     }
    1882 
    1883     Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    1884     pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
    1885     return rcStrict;
    1886 }
    1887 
    1888 
    1889 /**
    1890  * VMPTRLD instruction execution worker.
    1891  *
    1892  * @returns Strict VBox status code.
    1893  * @param   pVCpu           The cross context virtual CPU structure.
    1894  * @param   cbInstr         The instruction length.
    1895  * @param   GCPtrVmcs       The linear address of the current VMCS pointer.
    1896  * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
    1897  *                          be NULL.
    1898  *
    1899  * @remarks Common VMX instruction checks are already expected to by the caller,
    1900  *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
    1901  */
    1902 IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
    1903                                       PCVMXVEXITINFO pExitInfo)
    1904 {
    1905     if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    1906     {
    1907         RT_NOREF(pExitInfo);
    1908         /** @todo NSTVMX: intercept. */
    1909     }
    1910     Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
    1911 
    1912     /* CPL. */
    1913     if (pVCpu->iem.s.uCpl > 0)
    1914     {
    1915         Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    1916         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
    1917         return iemRaiseGeneralProtectionFault0(pVCpu);
    1918     }
    1919 
    1920     /* Get the VMCS pointer from the location specified by the source memory operand. */
    1921     RTGCPHYS GCPhysVmcs;
    1922     VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
    1923     if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    1924     {
    1925         Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
    1926         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
    1927         return rcStrict;
    1928     }
    1929 
    1930     /* VMCS pointer alignment. */
    1931     if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
    1932     {
    1933         Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
    1934         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
    1935         iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
    1936         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1937         return VINF_SUCCESS;
    1938     }
    1939 
    1940     /* VMCS physical-address width limits. */
    1941     if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    1942     {
    1943         Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
    1944         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
    1945         iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
    1946         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1947         return VINF_SUCCESS;
    1948     }
    1949 
    1950     /* VMCS is not the VMXON region. */
    1951     if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
    1952     {
    1953         Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
    1954         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
    1955         iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
    1956         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1957         return VINF_SUCCESS;
    1958     }
    1959 
    1960     /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
    1961        restriction imposed by our implementation. */
    1962     if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
    1963     {
    1964         Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
    1965         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
    1966         iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
    1967         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1968         return VINF_SUCCESS;
    1969     }
    1970 
    1971     /* Read the VMCS revision ID from the VMCS. */
    1972     VMXVMCSREVID VmcsRevId;
    1973     int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
    1974     if (RT_FAILURE(rc))
    1975     {
    1976         Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
    1977         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
    1978         return rc;
    1979     }
    1980 
    1981     /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
    1982        also check VMCS shadowing feature. */
    1983     if (   VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
    1984         || (   VmcsRevId.n.fIsShadowVmcs
    1985             && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
    1986     {
    1987         if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
    1988         {
    1989             Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
    1990                  VmcsRevId.n.u31RevisionId));
    1991             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
    1992             iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
    1993             iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    1994             return VINF_SUCCESS;
    1995         }
    1996 
    1997         Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
    1998         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
    1999         iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
    2000         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2001         return VINF_SUCCESS;
    2002     }
    2003 
    2004     /*
    2005      * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
    2006      * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
    2007      * a new VMCS as current.
    2008      */
    2009     if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
    2010     {
    2011         iemVmxCommitCurrentVmcsToMemory(pVCpu);
    2012         IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
    2013     }
    2014 
    2015     iemVmxVmSucceed(pVCpu);
    2016     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2017     return VINF_SUCCESS;
    2018 }
    2019 
    2020 
    2021 /**
    2022  * VMXON instruction execution worker.
    2023  *
    2024  * @returns Strict VBox status code.
    2025  * @param   pVCpu           The cross context virtual CPU structure.
    2026  * @param   cbInstr         The instruction length.
    2027  * @param   iEffSeg         The effective segment register to use with @a
    2028  *                          GCPtrVmxon.
    2029  * @param   GCPtrVmxon      The linear address of the VMXON pointer.
    2030  * @param   pExitInfo       Pointer to the VM-exit instruction information struct.
    2031  *                          Optional, can  be NULL.
    2032  *
    2033  * @remarks Common VMX instruction checks are already expected to by the caller,
    2034  *          i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
    2035  */
    2036 IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
    2037                                     PCVMXVEXITINFO pExitInfo)
    2038 {
    2039 #if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
    2040     RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
    2041     return VINF_EM_RAW_EMULATE_INSTR;
    2042 #else
    2043     if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
    2044     {
    2045         /* CPL. */
    2046         if (pVCpu->iem.s.uCpl > 0)
    2047         {
    2048             Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    2049             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
    2050             return iemRaiseGeneralProtectionFault0(pVCpu);
    2051         }
    2052 
    2053         /* A20M (A20 Masked) mode. */
    2054         if (!PGMPhysIsA20Enabled(pVCpu))
    2055         {
    2056             Log(("vmxon: A20M mode -> #GP(0)\n"));
    2057             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
    2058             return iemRaiseGeneralProtectionFault0(pVCpu);
    2059         }
    2060 
    2061         /* CR0 MB1 bits. */
    2062         uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
    2063         if (~pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0)
    2064         {
    2065             Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
    2066             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
    2067             return iemRaiseGeneralProtectionFault0(pVCpu);
    2068         }
    2069 
    2070         /* CR4 MB1 bits. */
    2071         uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
    2072         if (~pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0)
    2073         {
    2074             Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
    2075             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
    2076             return iemRaiseGeneralProtectionFault0(pVCpu);
    2077         }
    2078 
    2079         /* Feature control MSR's LOCK and VMXON bits. */
    2080         uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
    2081         if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
    2082         {
    2083             Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
    2084             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
    2085             return iemRaiseGeneralProtectionFault0(pVCpu);
    2086         }
    2087 
    2088         /* Get the VMXON pointer from the location specified by the source memory operand. */
    2089         RTGCPHYS GCPhysVmxon;
    2090         VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
    2091         if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
    2092         {
    2093             Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
    2094             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
    2095             return rcStrict;
    2096         }
    2097 
    2098         /* VMXON region pointer alignment. */
    2099         if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
    2100         {
    2101             Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
    2102             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
    2103             iemVmxVmFailInvalid(pVCpu);
    2104             iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2105             return VINF_SUCCESS;
    2106         }
    2107 
    2108         /* VMXON physical-address width limits. */
    2109         if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
    2110         {
    2111             Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
    2112             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
    2113             iemVmxVmFailInvalid(pVCpu);
    2114             iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2115             return VINF_SUCCESS;
    2116         }
    2117 
    2118         /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
    2119            restriction imposed by our implementation. */
    2120         if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
    2121         {
    2122             Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
    2123             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
    2124             iemVmxVmFailInvalid(pVCpu);
    2125             iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2126             return VINF_SUCCESS;
    2127         }
    2128 
    2129         /* Read the VMCS revision ID from the VMXON region. */
    2130         VMXVMCSREVID VmcsRevId;
    2131         int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
    2132         if (RT_FAILURE(rc))
    2133         {
    2134             Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
    2135             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
    2136             return rc;
    2137         }
    2138 
    2139         /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
    2140         if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
    2141         {
    2142             /* Revision ID mismatch. */
    2143             if (!VmcsRevId.n.fIsShadowVmcs)
    2144             {
    2145                 Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
    2146                      VmcsRevId.n.u31RevisionId));
    2147                 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
    2148                 iemVmxVmFailInvalid(pVCpu);
    2149                 iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2150                 return VINF_SUCCESS;
    2151             }
    2152 
    2153             /* Shadow VMCS disallowed. */
    2154             Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
    2155             pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
    2156             iemVmxVmFailInvalid(pVCpu);
    2157             iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2158             return VINF_SUCCESS;
    2159         }
    2160 
    2161         /*
    2162          * Record that we're in VMX operation, block INIT, block and disable A20M.
    2163          */
    2164         pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon    = GCPhysVmxon;
    2165         IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
    2166         pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
    2167 
    2168         /* Clear address-range monitoring. */
    2169         EMMonitorWaitClear(pVCpu);
    2170         /** @todo NSTVMX: Intel PT. */
    2171 
    2172         iemVmxVmSucceed(pVCpu);
    2173         iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2174 # if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
    2175         return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
    2176 # else
    2177         return VINF_SUCCESS;
    2178 # endif
    2179     }
    2180     else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    2181     {
    2182         RT_NOREF(pExitInfo);
    2183         /** @todo NSTVMX: intercept. */
    2184     }
    2185 
    2186     Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
    2187 
    2188     /* CPL. */
    2189     if (pVCpu->iem.s.uCpl > 0)
    2190     {
    2191         Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
    2192         pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
    2193         return iemRaiseGeneralProtectionFault0(pVCpu);
    2194     }
    2195 
    2196     /* VMXON when already in VMX root mode. */
    2197     iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
    2198     pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
    2199     iemRegAddToRipAndClearRF(pVCpu, cbInstr);
    2200     return VINF_SUCCESS;
    2201 #endif
    22021393}
    22031394
     
    33292520    pVmcs->u32RoExitReason = uExitReason;
    33302521
    3331     /** @todo NSTVMX: Update VM-exit instruction length for instruction VM-exits. */
    33322522    /** @todo NSTVMX: IEMGetCurrentXcpt will be VM-exit interruption info. */
    33332523    /** @todo NSTVMX: The source event should be recorded in IDT-vectoring info
     
    33702560 * VMX VM-exit handler for VM-exits due to instruction execution.
    33712561 *
    3372  * The instructions covered here have a ModR/M byte and update the VM-exit
    3373  * instruction info. field.
     2562 * This is intended for instructions where the caller provides all the relevant
     2563 * VM-exit information.
     2564 *
     2565 * @param   pVCpu           The cross context virtual CPU structure.
     2566 * @param   pExitInfo       Pointer to the VM-exit instruction information struct.
     2567 */
     2568DECLINLINE(VBOXSTRICTRC) iemVmxVmexitInstrWithInfo(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
     2569{
     2570    /*
     2571     * Update all the VMCS fields from the VM-exit instruction information struct.
     2572     * For instructions where the following fields are not applicable:
     2573     *
     2574     *   - VM-exit instruction info. is undefined for instructions where it does not apply.
     2575     *   - VM-exit qualification must be cleared for instruction where it does not apply.
     2576     *   - VM-exit guest-linear address is undefined for instructions where it does not apply.
     2577     *   - VM-exit guest-physical address is undefined for instructions where it does not apply.
     2578     *   - VM-exit instruction length is mandatory, we assert for basic sanity.
     2579     *
     2580     * See Intel spec. 27.2.1 "Basic VM-Exit Information".
     2581     */
     2582    Assert(pExitInfo);
     2583    AssertMsg(pExitInfo->uReason <= VMX_EXIT_MAX, ("uReason=%u\n", pExitInfo->uReason));
     2584    AssertMsg(pExitInfo->cbInstr >= 1 && pExitInfo->cbInstr <= 15,
     2585              ("uReason=%u cbInstr=%u\n", pExitInfo->uReason, pExitInfo->cbInstr));
     2586
     2587    iemVmxVmcsSetExitInstrInfo(pVCpu, pExitInfo->InstrInfo.u);
     2588    iemVmxVmcsSetExitQual(pVCpu, pExitInfo->u64Qual);
     2589    iemVmxVmcsSetExitGuestLinearAddr(pVCpu, pExitInfo->u64GuestLinearAddr);
     2590    iemVmxVmcsSetExitGuestPhysAddr(pVCpu, pExitInfo->u64GuestPhysAddr);
     2591    iemVmxVmcsSetExitInstrLen(pVCpu, pExitInfo->cbInstr);
     2592
     2593    return iemVmxVmexit(pVCpu, pExitInfo->uReason);
     2594}
     2595
     2596
     2597/**
     2598 * VMX VM-exit handler for VM-exits due to instruction execution.
     2599 *
     2600 * This is intended for instructions that only provide the VM-exit instruction
     2601 * length.
     2602 *
     2603 * @param   pVCpu           The cross context virtual CPU structure.
     2604 * @param   uExitReason     The VM-exit reason.
     2605 * @param   cbInstr         The instruction length (in bytes).
     2606 */
     2607IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, uint8_t cbInstr)
     2608{
     2609    VMXVEXITINFO ExitInfo;
     2610    RT_ZERO(ExitInfo);
     2611    ExitInfo.uReason = uExitReason;
     2612    ExitInfo.cbInstr = cbInstr;
     2613    return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
     2614}
     2615
     2616
     2617/**
     2618 * VMX VM-exit handler for VM-exits due to instruction execution.
     2619 *
     2620 * This is intended for instructions that have a ModR/M byte and update the VM-exit
     2621 * instruction information and VM-exit qualification fields.
    33742622 *
    33752623 * @param   pVCpu           The cross context virtual CPU structure.
     
    33772625 * @param   uInstrid        The instruction identity (VMXINSTRID_XXX).
    33782626 * @param   cbInstr         The instruction length (in bytes).
    3379  */
    3380 IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstr(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
    3381 {
    3382     /* Construct the VM-exit instruction information. */
    3383     RTGCPTR GCPtrDisp;
    3384     uint32_t const uExitInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
    3385 
    3386     /* Update the VM-exit instruction information. */
    3387     iemVmxVmcsSetExitInstrInfo(pVCpu, uExitInstrInfo);
     2627 *
     2628 * @remarks Do not use this for INS/OUTS instruction.
     2629 */
     2630IEM_STATIC VBOXSTRICTRC iemVmxVmexitInstrNeedsInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID uInstrId, uint8_t cbInstr)
     2631{
     2632    VMXVEXITINFO ExitInfo;
     2633    RT_ZERO(ExitInfo);
     2634    ExitInfo.uReason = uExitReason;
     2635    ExitInfo.cbInstr = cbInstr;
    33882636
    33892637    /*
     
    34072655        case VMX_EXIT_RDRAND:
    34082656        case VMX_EXIT_RDSEED:
    3409             iemVmxVmcsSetExitQual(pVCpu, GCPtrDisp);
     2657        {
     2658            /* Construct the VM-exit instruction information. */
     2659            RTGCPTR GCPtrDisp;
     2660            uint32_t const uInstrInfo = iemVmxGetExitInstrInfo(pVCpu, uExitReason, uInstrId, &GCPtrDisp);
     2661
     2662            /* Update the VM-exit instruction information. */
     2663            ExitInfo.InstrInfo.u = uInstrInfo;
     2664
     2665            /* Update the VM-exit qualification. */
     2666            ExitInfo.u64Qual = GCPtrDisp;
    34102667            break;
     2668        }
    34112669
    34122670        default:
    34132671            AssertMsgFailedReturn(("Use instruction-specific handler\n"), VERR_IEM_IPE_5);
    3414     }
    3415 
    3416     /* Update the VM-exit instruction length field. */
    3417     Assert(cbInstr <= 15);
    3418     iemVmxVmcsSetExitInstrLen(pVCpu, cbInstr);
    3419 
    3420     /* Perform the VM-exit. */
    3421     return iemVmxVmexit(pVCpu, uExitReason);
     2672            break;
     2673    }
     2674
     2675    return iemVmxVmexitInstrWithInfo(pVCpu, &ExitInfo);
    34222676}
    34232677
     
    53824636           || uInstrId == VMXINSTRID_VMRESUME);
    53834637
    5384     const char *pszInstr = uInstrId == VMXINSTRID_VMLAUNCH ? "vmlaunch" : "vmresume";
     4638    const char *pszInstr = uInstrId == VMXINSTRID_VMRESUME ? "vmresume" : "vmlaunch";
    53854639    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    53864640    {
    5387         RT_NOREF(pExitInfo);
    5388         /** @todo NSTVMX: intercept. */
    5389     }
     4641        if (pExitInfo)
     4642            return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
     4643        uint32_t const uExitReason = uInstrId == VMXINSTRID_VMRESUME ? VMX_EXIT_VMRESUME : VMX_EXIT_VMLAUNCH;
     4644        return iemVmxVmexitInstrNeedsInfo(pVCpu, uExitReason, uInstrId, cbInstr);
     4645    }
     4646
    53904647    Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
    53914648
     
    55394796
    55404797/**
    5541  * Implements 'VMXON'.
    5542  */
    5543 IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
    5544 {
    5545     return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
     4798 * VMREAD common (memory/register) instruction execution worker
     4799 *
     4800 * @returns Strict VBox status code.
     4801 * @param   pVCpu           The cross context virtual CPU structure.
     4802 * @param   cbInstr         The instruction length.
     4803 * @param   pu64Dst         Where to write the VMCS value (only updated when
     4804 *                          VINF_SUCCESS is returned).
     4805 * @param   u64FieldEnc     The VMCS field encoding.
     4806 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     4807 *                          be NULL.
     4808 */
     4809IEM_STATIC VBOXSTRICTRC iemVmxVmreadCommon(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
     4810                                           PCVMXVEXITINFO pExitInfo)
     4811{
     4812    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     4813    {
     4814        RT_NOREF(pExitInfo); RT_NOREF(cbInstr);
     4815        /** @todo NSTVMX: intercept. */
     4816        /** @todo NSTVMX: VMCS shadowing intercept (VMREAD bitmap). */
     4817    }
     4818
     4819    /* CPL. */
     4820    if (pVCpu->iem.s.uCpl > 0)
     4821    {
     4822        Log(("vmread: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     4823        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_Cpl;
     4824        return iemRaiseGeneralProtectionFault0(pVCpu);
     4825    }
     4826
     4827    /* VMCS pointer in root mode. */
     4828    if (    IEM_IS_VMX_ROOT_MODE(pVCpu)
     4829        && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     4830    {
     4831        Log(("vmread: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
     4832        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrInvalid;
     4833        iemVmxVmFailInvalid(pVCpu);
     4834        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     4835        return VINF_SUCCESS;
     4836    }
     4837
     4838    /* VMCS-link pointer in non-root mode. */
     4839    if (    IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
     4840        && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
     4841    {
     4842        Log(("vmread: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
     4843        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_LinkPtrInvalid;
     4844        iemVmxVmFailInvalid(pVCpu);
     4845        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     4846        return VINF_SUCCESS;
     4847    }
     4848
     4849    /* Supported VMCS field. */
     4850    if (iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
     4851    {
     4852        Log(("vmread: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
     4853        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_FieldInvalid;
     4854        iemVmxVmFail(pVCpu, VMXINSTRERR_VMREAD_INVALID_COMPONENT);
     4855        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     4856        return VINF_SUCCESS;
     4857    }
     4858
     4859    /*
     4860     * Setup reading from the current or shadow VMCS.
     4861     */
     4862    uint8_t *pbVmcs;
     4863    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     4864        pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
     4865    else
     4866        pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     4867    Assert(pbVmcs);
     4868
     4869    VMXVMCSFIELDENC FieldEnc;
     4870    FieldEnc.u = RT_LO_U32(u64FieldEnc);
     4871    uint8_t  const uWidth     = FieldEnc.n.u2Width;
     4872    uint8_t  const uType      = FieldEnc.n.u2Type;
     4873    uint8_t  const uWidthType = (uWidth << 2) | uType;
     4874    uint8_t  const uIndex     = FieldEnc.n.u8Index;
     4875    AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
     4876    uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
     4877
     4878    /*
     4879     * Read the VMCS component based on the field's effective width.
     4880     *
     4881     * The effective width is 64-bit fields adjusted to 32-bits if the access-type
     4882     * indicates high bits (little endian).
     4883     *
     4884     * Note! The caller is responsible to trim the result and update registers
     4885     * or memory locations are required. Here we just zero-extend to the largest
     4886     * type (i.e. 64-bits).
     4887     */
     4888    uint8_t      *pbField = pbVmcs + offField;
     4889    uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
     4890    switch (uEffWidth)
     4891    {
     4892        case VMX_VMCS_ENC_WIDTH_64BIT:
     4893        case VMX_VMCS_ENC_WIDTH_NATURAL: *pu64Dst = *(uint64_t *)pbField; break;
     4894        case VMX_VMCS_ENC_WIDTH_32BIT:   *pu64Dst = *(uint32_t *)pbField; break;
     4895        case VMX_VMCS_ENC_WIDTH_16BIT:   *pu64Dst = *(uint16_t *)pbField; break;
     4896    }
     4897    return VINF_SUCCESS;
     4898}
     4899
     4900
     4901/**
     4902 * VMREAD (64-bit register) instruction execution worker.
     4903 *
     4904 * @returns Strict VBox status code.
     4905 * @param   pVCpu           The cross context virtual CPU structure.
     4906 * @param   cbInstr         The instruction length.
     4907 * @param   pu64Dst         Where to store the VMCS field's value.
     4908 * @param   u64FieldEnc     The VMCS field encoding.
     4909 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     4910 *                          be NULL.
     4911 */
     4912IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg64(PVMCPU pVCpu, uint8_t cbInstr, uint64_t *pu64Dst, uint64_t u64FieldEnc,
     4913                                          PCVMXVEXITINFO pExitInfo)
     4914{
     4915    VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
     4916    if (rcStrict == VINF_SUCCESS)
     4917    {
     4918        iemVmxVmreadSuccess(pVCpu, cbInstr);
     4919        return VINF_SUCCESS;
     4920    }
     4921
     4922    Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     4923    return rcStrict;
     4924}
     4925
     4926
     4927/**
     4928 * VMREAD (32-bit register) instruction execution worker.
     4929 *
     4930 * @returns Strict VBox status code.
     4931 * @param   pVCpu           The cross context virtual CPU structure.
     4932 * @param   cbInstr         The instruction length.
     4933 * @param   pu32Dst         Where to store the VMCS field's value.
     4934 * @param   u32FieldEnc     The VMCS field encoding.
     4935 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     4936 *                          be NULL.
     4937 */
     4938IEM_STATIC VBOXSTRICTRC iemVmxVmreadReg32(PVMCPU pVCpu, uint8_t cbInstr, uint32_t *pu32Dst, uint64_t u32FieldEnc,
     4939                                          PCVMXVEXITINFO pExitInfo)
     4940{
     4941    uint64_t u64Dst;
     4942    VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u32FieldEnc, pExitInfo);
     4943    if (rcStrict == VINF_SUCCESS)
     4944    {
     4945        *pu32Dst = u64Dst;
     4946        iemVmxVmreadSuccess(pVCpu, cbInstr);
     4947        return VINF_SUCCESS;
     4948    }
     4949
     4950    Log(("vmread/reg: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     4951    return rcStrict;
     4952}
     4953
     4954
     4955/**
     4956 * VMREAD (memory) instruction execution worker.
     4957 *
     4958 * @returns Strict VBox status code.
     4959 * @param   pVCpu           The cross context virtual CPU structure.
     4960 * @param   cbInstr         The instruction length.
     4961 * @param   iEffSeg         The effective segment register to use with @a u64Val.
     4962 *                          Pass UINT8_MAX if it is a register access.
     4963 * @param   enmEffAddrMode  The effective addressing mode (only used with memory
     4964 *                          operand).
     4965 * @param   GCPtrDst        The guest linear address to store the VMCS field's
     4966 *                          value.
     4967 * @param   u64FieldEnc     The VMCS field encoding.
     4968 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     4969 *                          be NULL.
     4970 */
     4971IEM_STATIC VBOXSTRICTRC iemVmxVmreadMem(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode,
     4972                                        RTGCPTR GCPtrDst, uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
     4973{
     4974    uint64_t u64Dst;
     4975    VBOXSTRICTRC rcStrict = iemVmxVmreadCommon(pVCpu, cbInstr, &u64Dst, u64FieldEnc, pExitInfo);
     4976    if (rcStrict == VINF_SUCCESS)
     4977    {
     4978        /*
     4979         * Write the VMCS field's value to the location specified in guest-memory.
     4980         *
     4981         * The pointer size depends on the address size (address-size prefix allowed).
     4982         * The operand size depends on IA-32e mode (operand-size prefix not allowed).
     4983         */
     4984        static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
     4985        Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
     4986        GCPtrDst &= s_auAddrSizeMasks[enmEffAddrMode];
     4987
     4988        if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
     4989            rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrDst, u64Dst);
     4990        else
     4991            rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrDst, u64Dst);
     4992        if (rcStrict == VINF_SUCCESS)
     4993        {
     4994            iemVmxVmreadSuccess(pVCpu, cbInstr);
     4995            return VINF_SUCCESS;
     4996        }
     4997
     4998        Log(("vmread/mem: Failed to write to memory operand at %#RGv, rc=%Rrc\n", GCPtrDst, VBOXSTRICTRC_VAL(rcStrict)));
     4999        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmread_PtrMap;
     5000        return rcStrict;
     5001    }
     5002
     5003    Log(("vmread/mem: iemVmxVmreadCommon failed rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     5004    return rcStrict;
     5005}
     5006
     5007
     5008/**
     5009 * VMWRITE instruction execution worker.
     5010 *
     5011 * @returns Strict VBox status code.
     5012 * @param   pVCpu           The cross context virtual CPU structure.
     5013 * @param   cbInstr         The instruction length.
     5014 * @param   iEffSeg         The effective segment register to use with @a u64Val.
     5015 *                          Pass UINT8_MAX if it is a register access.
     5016 * @param   enmEffAddrMode  The effective addressing mode (only used with memory
     5017 *                          operand).
     5018 * @param   u64Val          The value to write (or guest linear address to the
     5019 *                          value), @a iEffSeg will indicate if it's a memory
     5020 *                          operand.
     5021 * @param   u64FieldEnc     The VMCS field encoding.
     5022 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     5023 *                          be NULL.
     5024 */
     5025IEM_STATIC VBOXSTRICTRC iemVmxVmwrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, IEMMODE enmEffAddrMode, uint64_t u64Val,
     5026                                      uint64_t u64FieldEnc, PCVMXVEXITINFO pExitInfo)
     5027{
     5028    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     5029    {
     5030        RT_NOREF(pExitInfo);
     5031        /** @todo NSTVMX: intercept. */
     5032        /** @todo NSTVMX: VMCS shadowing intercept (VMWRITE bitmap). */
     5033    }
     5034
     5035    /* CPL. */
     5036    if (pVCpu->iem.s.uCpl > 0)
     5037    {
     5038        Log(("vmwrite: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5039        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_Cpl;
     5040        return iemRaiseGeneralProtectionFault0(pVCpu);
     5041    }
     5042
     5043    /* VMCS pointer in root mode. */
     5044    if (    IEM_IS_VMX_ROOT_MODE(pVCpu)
     5045        && !IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
     5046    {
     5047        Log(("vmwrite: VMCS pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_CURRENT_VMCS(pVCpu)));
     5048        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrInvalid;
     5049        iemVmxVmFailInvalid(pVCpu);
     5050        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5051        return VINF_SUCCESS;
     5052    }
     5053
     5054    /* VMCS-link pointer in non-root mode. */
     5055    if (    IEM_IS_VMX_NON_ROOT_MODE(pVCpu)
     5056        && !IEM_VMX_HAS_SHADOW_VMCS(pVCpu))
     5057    {
     5058        Log(("vmwrite: VMCS-link pointer %#RGp invalid -> VMFailInvalid\n", IEM_VMX_GET_SHADOW_VMCS(pVCpu)));
     5059        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_LinkPtrInvalid;
     5060        iemVmxVmFailInvalid(pVCpu);
     5061        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5062        return VINF_SUCCESS;
     5063    }
     5064
     5065    /* If the VMWRITE instruction references memory, access the specified memory operand. */
     5066    bool const fIsRegOperand = iEffSeg == UINT8_MAX;
     5067    if (!fIsRegOperand)
     5068    {
     5069        static uint64_t const s_auAddrSizeMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_C(0xffffffffffffffff) };
     5070        Assert(enmEffAddrMode < RT_ELEMENTS(s_auAddrSizeMasks));
     5071        RTGCPTR const GCPtrVal = u64Val & s_auAddrSizeMasks[enmEffAddrMode];
     5072
     5073        /* Read the value from the specified guest memory location. */
     5074        VBOXSTRICTRC rcStrict;
     5075        if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
     5076            rcStrict = iemMemFetchDataU64(pVCpu, &u64Val, iEffSeg, GCPtrVal);
     5077        else
     5078        {
     5079            uint32_t u32Val;
     5080            rcStrict = iemMemFetchDataU32(pVCpu, &u32Val, iEffSeg, GCPtrVal);
     5081            u64Val = u32Val;
     5082        }
     5083        if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     5084        {
     5085            Log(("vmwrite: Failed to read value from memory operand at %#RGv, rc=%Rrc\n", GCPtrVal, VBOXSTRICTRC_VAL(rcStrict)));
     5086            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_PtrMap;
     5087            return rcStrict;
     5088        }
     5089    }
     5090    else
     5091        Assert(!pExitInfo || pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand);
     5092
     5093    /* Supported VMCS field. */
     5094    if (!iemVmxIsVmcsFieldValid(pVCpu, u64FieldEnc))
     5095    {
     5096        Log(("vmwrite: VMCS field %#RX64 invalid -> VMFail\n", u64FieldEnc));
     5097        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldInvalid;
     5098        iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_INVALID_COMPONENT);
     5099        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5100        return VINF_SUCCESS;
     5101    }
     5102
     5103    /* Read-only VMCS field. */
     5104    bool const fReadOnlyField = HMVmxIsVmcsFieldReadOnly(u64FieldEnc);
     5105    if (   fReadOnlyField
     5106        && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmwriteAll)
     5107    {
     5108        Log(("vmwrite: Write to read-only VMCS component %#RX64 -> VMFail\n", u64FieldEnc));
     5109        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmwrite_FieldRo;
     5110        iemVmxVmFail(pVCpu, VMXINSTRERR_VMWRITE_RO_COMPONENT);
     5111        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5112        return VINF_SUCCESS;
     5113    }
     5114
     5115    /*
     5116     * Setup writing to the current or shadow VMCS.
     5117     */
     5118    uint8_t *pbVmcs;
     5119    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     5120        pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pShadowVmcs);
     5121    else
     5122        pbVmcs = (uint8_t *)pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
     5123    Assert(pbVmcs);
     5124
     5125    VMXVMCSFIELDENC FieldEnc;
     5126    FieldEnc.u = RT_LO_U32(u64FieldEnc);
     5127    uint8_t  const uWidth     = FieldEnc.n.u2Width;
     5128    uint8_t  const uType      = FieldEnc.n.u2Type;
     5129    uint8_t  const uWidthType = (uWidth << 2) | uType;
     5130    uint8_t  const uIndex     = FieldEnc.n.u8Index;
     5131    AssertReturn(uIndex <= VMX_V_VMCS_MAX_INDEX, VERR_IEM_IPE_2);
     5132    uint16_t const offField   = g_aoffVmcsMap[uWidthType][uIndex];
     5133
     5134    /*
     5135     * Write the VMCS component based on the field's effective width.
     5136     *
     5137     * The effective width is 64-bit fields adjusted to 32-bits if the access-type
     5138     * indicates high bits (little endian).
     5139     */
     5140    uint8_t      *pbField = pbVmcs + offField;
     5141    uint8_t const uEffWidth = HMVmxGetVmcsFieldWidthEff(FieldEnc.u);
     5142    switch (uEffWidth)
     5143    {
     5144        case VMX_VMCS_ENC_WIDTH_64BIT:
     5145        case VMX_VMCS_ENC_WIDTH_NATURAL: *(uint64_t *)pbField = u64Val; break;
     5146        case VMX_VMCS_ENC_WIDTH_32BIT:   *(uint32_t *)pbField = u64Val; break;
     5147        case VMX_VMCS_ENC_WIDTH_16BIT:   *(uint16_t *)pbField = u64Val; break;
     5148    }
     5149
     5150    iemVmxVmSucceed(pVCpu);
     5151    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5152    return VINF_SUCCESS;
     5153}
     5154
     5155
     5156/**
     5157 * VMCLEAR instruction execution worker.
     5158 *
     5159 * @returns Strict VBox status code.
     5160 * @param   pVCpu           The cross context virtual CPU structure.
     5161 * @param   cbInstr         The instruction length.
     5162 * @param   iEffSeg         The effective segment register to use with @a GCPtrVmcs.
     5163 * @param   GCPtrVmcs       The linear address of the VMCS pointer.
     5164 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     5165 *                          be NULL.
     5166 *
     5167 * @remarks Common VMX instruction checks are already expected to by the caller,
     5168 *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
     5169 */
     5170IEM_STATIC VBOXSTRICTRC iemVmxVmclear(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
     5171                                      PCVMXVEXITINFO pExitInfo)
     5172{
     5173    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     5174    {
     5175        if (pExitInfo)
     5176            return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
     5177        return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMCLEAR, VMXINSTRID_NONE, cbInstr);
     5178    }
     5179
     5180    Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
     5181
     5182    /* CPL. */
     5183    if (pVCpu->iem.s.uCpl > 0)
     5184    {
     5185        Log(("vmclear: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5186        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_Cpl;
     5187        return iemRaiseGeneralProtectionFault0(pVCpu);
     5188    }
     5189
     5190    /* Get the VMCS pointer from the location specified by the source memory operand. */
     5191    RTGCPHYS GCPhysVmcs;
     5192    VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
     5193    if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     5194    {
     5195        Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
     5196        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrMap;
     5197        return rcStrict;
     5198    }
     5199
     5200    /* VMCS pointer alignment. */
     5201    if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
     5202    {
     5203        Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
     5204        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAlign;
     5205        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
     5206        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5207        return VINF_SUCCESS;
     5208    }
     5209
     5210    /* VMCS physical-address width limits. */
     5211    if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     5212    {
     5213        Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
     5214        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrWidth;
     5215        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
     5216        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5217        return VINF_SUCCESS;
     5218    }
     5219
     5220    /* VMCS is not the VMXON region. */
     5221    if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
     5222    {
     5223        Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
     5224        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrVmxon;
     5225        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
     5226        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5227        return VINF_SUCCESS;
     5228    }
     5229
     5230    /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
     5231       restriction imposed by our implementation. */
     5232    if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
     5233    {
     5234        Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
     5235        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmclear_PtrAbnormal;
     5236        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
     5237        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5238        return VINF_SUCCESS;
     5239    }
     5240
     5241    /*
     5242     * VMCLEAR allows committing and clearing any valid VMCS pointer.
     5243     *
     5244     * If the current VMCS is the one being cleared, set its state to 'clear' and commit
     5245     * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
     5246     * to 'clear'.
     5247     */
     5248    uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
     5249    if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
     5250    {
     5251        Assert(GCPhysVmcs != NIL_RTGCPHYS);                     /* Paranoia. */
     5252        Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs));
     5253        pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
     5254        iemVmxCommitCurrentVmcsToMemory(pVCpu);
     5255        Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
     5256    }
     5257    else
     5258    {
     5259        rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
     5260                                            (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
     5261    }
     5262
     5263    iemVmxVmSucceed(pVCpu);
     5264    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5265    return rcStrict;
     5266}
     5267
     5268
     5269/**
     5270 * VMPTRST instruction execution worker.
     5271 *
     5272 * @returns Strict VBox status code.
     5273 * @param   pVCpu           The cross context virtual CPU structure.
     5274 * @param   cbInstr         The instruction length.
     5275 * @param   iEffSeg         The effective segment register to use with @a GCPtrVmcs.
     5276 * @param   GCPtrVmcs       The linear address of where to store the current VMCS
     5277 *                          pointer.
     5278 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     5279 *                          be NULL.
     5280 *
     5281 * @remarks Common VMX instruction checks are already expected to by the caller,
     5282 *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
     5283 */
     5284IEM_STATIC VBOXSTRICTRC iemVmxVmptrst(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
     5285                                      PCVMXVEXITINFO pExitInfo)
     5286{
     5287    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     5288    {
     5289        if (pExitInfo)
     5290            return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
     5291        return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRST, VMXINSTRID_NONE, cbInstr);
     5292    }
     5293
     5294    Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
     5295
     5296    /* CPL. */
     5297    if (pVCpu->iem.s.uCpl > 0)
     5298    {
     5299        Log(("vmptrst: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5300        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_Cpl;
     5301        return iemRaiseGeneralProtectionFault0(pVCpu);
     5302    }
     5303
     5304    /* Set the VMCS pointer to the location specified by the destination memory operand. */
     5305    AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
     5306    VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, iEffSeg, GCPtrVmcs, IEM_VMX_GET_CURRENT_VMCS(pVCpu));
     5307    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
     5308    {
     5309        iemVmxVmSucceed(pVCpu);
     5310        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5311        return rcStrict;
     5312    }
     5313
     5314    Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     5315    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrst_PtrMap;
     5316    return rcStrict;
     5317}
     5318
     5319
     5320/**
     5321 * VMPTRLD instruction execution worker.
     5322 *
     5323 * @returns Strict VBox status code.
     5324 * @param   pVCpu           The cross context virtual CPU structure.
     5325 * @param   cbInstr         The instruction length.
     5326 * @param   GCPtrVmcs       The linear address of the current VMCS pointer.
     5327 * @param   pExitInfo       Pointer to the VM-exit information struct. Optional, can
     5328 *                          be NULL.
     5329 *
     5330 * @remarks Common VMX instruction checks are already expected to by the caller,
     5331 *          i.e. VMX operation, CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
     5332 */
     5333IEM_STATIC VBOXSTRICTRC iemVmxVmptrld(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmcs,
     5334                                      PCVMXVEXITINFO pExitInfo)
     5335{
     5336    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     5337    {
     5338        if (pExitInfo)
     5339            return iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
     5340        return iemVmxVmexitInstrNeedsInfo(pVCpu, VMX_EXIT_VMPTRLD, VMXINSTRID_NONE, cbInstr);
     5341    }
     5342
     5343    Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
     5344
     5345    /* CPL. */
     5346    if (pVCpu->iem.s.uCpl > 0)
     5347    {
     5348        Log(("vmptrld: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5349        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_Cpl;
     5350        return iemRaiseGeneralProtectionFault0(pVCpu);
     5351    }
     5352
     5353    /* Get the VMCS pointer from the location specified by the source memory operand. */
     5354    RTGCPHYS GCPhysVmcs;
     5355    VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, iEffSeg, GCPtrVmcs);
     5356    if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     5357    {
     5358        Log(("vmptrld: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
     5359        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrMap;
     5360        return rcStrict;
     5361    }
     5362
     5363    /* VMCS pointer alignment. */
     5364    if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
     5365    {
     5366        Log(("vmptrld: VMCS pointer not page-aligned -> VMFail()\n"));
     5367        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAlign;
     5368        iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
     5369        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5370        return VINF_SUCCESS;
     5371    }
     5372
     5373    /* VMCS physical-address width limits. */
     5374    if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     5375    {
     5376        Log(("vmptrld: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
     5377        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrWidth;
     5378        iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
     5379        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5380        return VINF_SUCCESS;
     5381    }
     5382
     5383    /* VMCS is not the VMXON region. */
     5384    if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
     5385    {
     5386        Log(("vmptrld: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
     5387        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrVmxon;
     5388        iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_VMXON_PTR);
     5389        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5390        return VINF_SUCCESS;
     5391    }
     5392
     5393    /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
     5394       restriction imposed by our implementation. */
     5395    if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
     5396    {
     5397        Log(("vmptrld: VMCS not normal memory -> VMFail()\n"));
     5398        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrAbnormal;
     5399        iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INVALID_PHYSADDR);
     5400        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5401        return VINF_SUCCESS;
     5402    }
     5403
     5404    /* Read the VMCS revision ID from the VMCS. */
     5405    VMXVMCSREVID VmcsRevId;
     5406    int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmcs, sizeof(VmcsRevId));
     5407    if (RT_FAILURE(rc))
     5408    {
     5409        Log(("vmptrld: Failed to read VMCS at %#RGp, rc=%Rrc\n", GCPhysVmcs, rc));
     5410        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_PtrReadPhys;
     5411        return rc;
     5412    }
     5413
     5414    /* Verify the VMCS revision specified by the guest matches what we reported to the guest,
     5415       also check VMCS shadowing feature. */
     5416    if (   VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID
     5417        || (   VmcsRevId.n.fIsShadowVmcs
     5418            && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fVmxVmcsShadowing))
     5419    {
     5420        if (VmcsRevId.n.u31RevisionId != VMX_V_VMCS_REVISION_ID)
     5421        {
     5422            Log(("vmptrld: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFail()\n", VMX_V_VMCS_REVISION_ID,
     5423                 VmcsRevId.n.u31RevisionId));
     5424            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_VmcsRevId;
     5425            iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
     5426            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5427            return VINF_SUCCESS;
     5428        }
     5429
     5430        Log(("vmptrld: Shadow VMCS -> VMFail()\n"));
     5431        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmptrld_ShadowVmcs;
     5432        iemVmxVmFail(pVCpu, VMXINSTRERR_VMPTRLD_INCORRECT_VMCS_REV);
     5433        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5434        return VINF_SUCCESS;
     5435    }
     5436
     5437    /*
     5438     * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
     5439     * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
     5440     * a new VMCS as current.
     5441     */
     5442    if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
     5443    {
     5444        iemVmxCommitCurrentVmcsToMemory(pVCpu);
     5445        IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
     5446    }
     5447
     5448    iemVmxVmSucceed(pVCpu);
     5449    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5450    return VINF_SUCCESS;
     5451}
     5452
     5453
     5454/**
     5455 * VMXON instruction execution worker.
     5456 *
     5457 * @returns Strict VBox status code.
     5458 * @param   pVCpu           The cross context virtual CPU structure.
     5459 * @param   cbInstr         The instruction length.
     5460 * @param   iEffSeg         The effective segment register to use with @a
     5461 *                          GCPtrVmxon.
     5462 * @param   GCPtrVmxon      The linear address of the VMXON pointer.
     5463 * @param   pExitInfo       Pointer to the VM-exit instruction information struct.
     5464 *                          Optional, can  be NULL.
     5465 *
     5466 * @remarks Common VMX instruction checks are already expected to by the caller,
     5467 *          i.e. CR4.VMXE, Real/V86 mode, EFER/CS.L checks.
     5468 */
     5469IEM_STATIC VBOXSTRICTRC iemVmxVmxon(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPHYS GCPtrVmxon,
     5470                                    PCVMXVEXITINFO pExitInfo)
     5471{
     5472#if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && !defined(IN_RING3)
     5473    RT_NOREF5(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
     5474    return VINF_EM_RAW_EMULATE_INSTR;
     5475#else
     5476    if (!IEM_IS_VMX_ROOT_MODE(pVCpu))
     5477    {
     5478        /* CPL. */
     5479        if (pVCpu->iem.s.uCpl > 0)
     5480        {
     5481            Log(("vmxon: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5482            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cpl;
     5483            return iemRaiseGeneralProtectionFault0(pVCpu);
     5484        }
     5485
     5486        /* A20M (A20 Masked) mode. */
     5487        if (!PGMPhysIsA20Enabled(pVCpu))
     5488        {
     5489            Log(("vmxon: A20M mode -> #GP(0)\n"));
     5490            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_A20M;
     5491            return iemRaiseGeneralProtectionFault0(pVCpu);
     5492        }
     5493
     5494        /* CR0 MB1 bits. */
     5495        uint64_t const uCr0Fixed0 = CPUMGetGuestIa32VmxCr0Fixed0(pVCpu);
     5496        if (~pVCpu->cpum.GstCtx.cr0 & uCr0Fixed0)
     5497        {
     5498            Log(("vmxon: CR0 fixed0 bits cleared -> #GP(0)\n"));
     5499            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr0Fixed0;
     5500            return iemRaiseGeneralProtectionFault0(pVCpu);
     5501        }
     5502
     5503        /* CR4 MB1 bits. */
     5504        uint64_t const uCr4Fixed0 = CPUMGetGuestIa32VmxCr4Fixed0(pVCpu);
     5505        if (~pVCpu->cpum.GstCtx.cr4 & uCr4Fixed0)
     5506        {
     5507            Log(("vmxon: CR4 fixed0 bits cleared -> #GP(0)\n"));
     5508            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_Cr4Fixed0;
     5509            return iemRaiseGeneralProtectionFault0(pVCpu);
     5510        }
     5511
     5512        /* Feature control MSR's LOCK and VMXON bits. */
     5513        uint64_t const uMsrFeatCtl = CPUMGetGuestIa32FeatureControl(pVCpu);
     5514        if (!(uMsrFeatCtl & (MSR_IA32_FEATURE_CONTROL_LOCK | MSR_IA32_FEATURE_CONTROL_VMXON)))
     5515        {
     5516            Log(("vmxon: Feature control lock bit or VMXON bit cleared -> #GP(0)\n"));
     5517            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_MsrFeatCtl;
     5518            return iemRaiseGeneralProtectionFault0(pVCpu);
     5519        }
     5520
     5521        /* Get the VMXON pointer from the location specified by the source memory operand. */
     5522        RTGCPHYS GCPhysVmxon;
     5523        VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, iEffSeg, GCPtrVmxon);
     5524        if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     5525        {
     5526            Log(("vmxon: Failed to read VMXON region physaddr from %#RGv, rc=%Rrc\n", GCPtrVmxon, VBOXSTRICTRC_VAL(rcStrict)));
     5527            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrMap;
     5528            return rcStrict;
     5529        }
     5530
     5531        /* VMXON region pointer alignment. */
     5532        if (GCPhysVmxon & X86_PAGE_4K_OFFSET_MASK)
     5533        {
     5534            Log(("vmxon: VMXON region pointer not page-aligned -> VMFailInvalid\n"));
     5535            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAlign;
     5536            iemVmxVmFailInvalid(pVCpu);
     5537            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5538            return VINF_SUCCESS;
     5539        }
     5540
     5541        /* VMXON physical-address width limits. */
     5542        if (GCPhysVmxon >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cVmxMaxPhysAddrWidth)
     5543        {
     5544            Log(("vmxon: VMXON region pointer extends beyond physical-address width -> VMFailInvalid\n"));
     5545            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrWidth;
     5546            iemVmxVmFailInvalid(pVCpu);
     5547            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5548            return VINF_SUCCESS;
     5549        }
     5550
     5551        /* Ensure VMXON region is not MMIO, ROM etc. This is not an Intel requirement but a
     5552           restriction imposed by our implementation. */
     5553        if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmxon))
     5554        {
     5555            Log(("vmxon: VMXON region not normal memory -> VMFailInvalid\n"));
     5556            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrAbnormal;
     5557            iemVmxVmFailInvalid(pVCpu);
     5558            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5559            return VINF_SUCCESS;
     5560        }
     5561
     5562        /* Read the VMCS revision ID from the VMXON region. */
     5563        VMXVMCSREVID VmcsRevId;
     5564        int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &VmcsRevId, GCPhysVmxon, sizeof(VmcsRevId));
     5565        if (RT_FAILURE(rc))
     5566        {
     5567            Log(("vmxon: Failed to read VMXON region at %#RGp, rc=%Rrc\n", GCPhysVmxon, rc));
     5568            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_PtrReadPhys;
     5569            return rc;
     5570        }
     5571
     5572        /* Verify the VMCS revision specified by the guest matches what we reported to the guest. */
     5573        if (RT_UNLIKELY(VmcsRevId.u != VMX_V_VMCS_REVISION_ID))
     5574        {
     5575            /* Revision ID mismatch. */
     5576            if (!VmcsRevId.n.fIsShadowVmcs)
     5577            {
     5578                Log(("vmxon: VMCS revision mismatch, expected %#RX32 got %#RX32 -> VMFailInvalid\n", VMX_V_VMCS_REVISION_ID,
     5579                     VmcsRevId.n.u31RevisionId));
     5580                pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmcsRevId;
     5581                iemVmxVmFailInvalid(pVCpu);
     5582                iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5583                return VINF_SUCCESS;
     5584            }
     5585
     5586            /* Shadow VMCS disallowed. */
     5587            Log(("vmxon: Shadow VMCS -> VMFailInvalid\n"));
     5588            pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_ShadowVmcs;
     5589            iemVmxVmFailInvalid(pVCpu);
     5590            iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5591            return VINF_SUCCESS;
     5592        }
     5593
     5594        /*
     5595         * Record that we're in VMX operation, block INIT, block and disable A20M.
     5596         */
     5597        pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon    = GCPhysVmxon;
     5598        IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
     5599        pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
     5600
     5601        /* Clear address-range monitoring. */
     5602        EMMonitorWaitClear(pVCpu);
     5603        /** @todo NSTVMX: Intel PT. */
     5604
     5605        iemVmxVmSucceed(pVCpu);
     5606        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5607# if defined(VBOX_WITH_NESTED_HWVIRT_ONLY_IN_IEM) && defined(IN_RING3)
     5608        return EMR3SetExecutionPolicy(pVCpu->CTX_SUFF(pVM)->pUVM, EMEXECPOLICY_IEM_ALL, true);
     5609# else
     5610        return VINF_SUCCESS;
     5611# endif
     5612    }
     5613    else if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
     5614    {
     5615        RT_NOREF(pExitInfo);
     5616        /** @todo NSTVMX: intercept. */
     5617    }
     5618
     5619    Assert(IEM_IS_VMX_ROOT_MODE(pVCpu));
     5620
     5621    /* CPL. */
     5622    if (pVCpu->iem.s.uCpl > 0)
     5623    {
     5624        Log(("vmxon: In VMX root mode: CPL %u -> #GP(0)\n", pVCpu->iem.s.uCpl));
     5625        pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxRootCpl;
     5626        return iemRaiseGeneralProtectionFault0(pVCpu);
     5627    }
     5628
     5629    /* VMXON when already in VMX root mode. */
     5630    iemVmxVmFail(pVCpu, VMXINSTRERR_VMXON_IN_VMXROOTMODE);
     5631    pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = kVmxVDiag_Vmxon_VmxAlreadyRoot;
     5632    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     5633    return VINF_SUCCESS;
     5634#endif
    55465635}
    55475636
     
    55605649# else
    55615650    if (IEM_IS_VMX_NON_ROOT_MODE(pVCpu))
    5562     {
    5563         /** @todo NSTVMX: intercept. */
    5564     }
     5651        return iemVmxVmexitInstr(pVCpu, VMX_EXIT_VMXOFF, cbInstr);
    55655652
    55665653    /* CPL. */
     
    56035690
    56045691/**
     5692 * Implements 'VMXON'.
     5693 */
     5694IEM_CIMPL_DEF_2(iemCImpl_vmxon, uint8_t, iEffSeg, RTGCPTR, GCPtrVmxon)
     5695{
     5696    return iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, NULL /* pExitInfo */);
     5697}
     5698
     5699
     5700/**
    56055701 * Implements 'VMLAUNCH'.
    56065702 */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette