VirtualBox

Changeset 73885 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Aug 25, 2018 4:00:00 AM (6 years ago)
Author:
vboxsync
Message:

VMM/HM, IEM: Nested VMX: bugref:9180 Implement VMCLEAR instruction, fix typos and missing enum defines for instruction diagnostics.

Location:
trunk/src/VBox/VMM/VMMAll
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/HMVMXAll.cpp

    r73756 r73885  
    8585    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Cpl         , "Cpl"          ),
    8686    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_PtrMap      , "PtrMap"       ),
     87    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmptrst_Success     , "Success"      ),
    8788    /* VMCLEAR. */
    88     VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl         , "Cpl"          )
     89    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Cpl         , "Cpl"          ),
     90    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAbnormal , "PtrAbnormal"  ),
     91    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrAlign    , "PtrAlign"     ),
     92    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrMap      , "PtrMap"       ),
     93    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrReadPhys , "PtrReadPhys"  ),
     94    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrVmxon    , "PtrVmxon"     ),
     95    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_PtrWidth    , "PtrWidth"     ),
     96    VMX_INSTR_DIAG_DESC(kVmxVInstrDiag_Vmclear_Success     , "Success"      )
    8997    /* kVmxVInstrDiag_Last */
    9098};
  • trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h

    r73870 r73885  
    5555    /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
    5656    {
    57         /*     0 */ RT_OFFSET(VMXVVMCS, GuestEs),
    58         /*     1 */ RT_OFFSET(VMXVVMCS, GuestCs),
    59         /*     2 */ RT_OFFSET(VMXVVMCS, GuestSs),
    60         /*     3 */ RT_OFFSET(VMXVVMCS, GuestDs),
    61         /*     4 */ RT_OFFSET(VMXVVMCS, GuestFs),
    62         /*     5 */ RT_OFFSET(VMXVVMCS, GuestGs),
    63         /*     6 */ RT_OFFSET(VMXVVMCS, GuestLdtr),
    64         /*     7 */ RT_OFFSET(VMXVVMCS, GuestTr),
    65         /*     8 */ RT_OFFSET(VMXVVMCS, u16GuestIntStatus),
    66         /*     9 */ RT_OFFSET(VMXVVMCS, u16PmlIndex),
     57        /*     0 */ RT_OFFSETOF(VMXVVMCS, GuestEs),
     58        /*     1 */ RT_OFFSETOF(VMXVVMCS, GuestCs),
     59        /*     2 */ RT_OFFSETOF(VMXVVMCS, GuestSs),
     60        /*     3 */ RT_OFFSETOF(VMXVVMCS, GuestDs),
     61        /*     4 */ RT_OFFSETOF(VMXVVMCS, GuestFs),
     62        /*     5 */ RT_OFFSETOF(VMXVVMCS, GuestGs),
     63        /*     6 */ RT_OFFSETOF(VMXVVMCS, GuestLdtr),
     64        /*     7 */ RT_OFFSETOF(VMXVVMCS, GuestTr),
     65        /*     8 */ RT_OFFSETOF(VMXVVMCS, u16GuestIntStatus),
     66        /*     9 */ RT_OFFSETOF(VMXVVMCS, u16PmlIndex),
    6767        /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    6868        /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
     
    7070    /* VMX_VMCS_ENC_WIDTH_16BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
    7171    {
    72         /*     0 */ RT_OFFSET(VMXVVMCS, HostEs),
    73         /*     1 */ RT_OFFSET(VMXVVMCS, HostCs),
    74         /*     2 */ RT_OFFSET(VMXVVMCS, HostSs),
    75         /*     3 */ RT_OFFSET(VMXVVMCS, HostDs),
    76         /*     4 */ RT_OFFSET(VMXVVMCS, HostFs),
    77         /*     5 */ RT_OFFSET(VMXVVMCS, HostGs),
    78         /*     6 */ RT_OFFSET(VMXVVMCS, HostTr),
     72        /*     0 */ RT_OFFSETOF(VMXVVMCS, HostEs),
     73        /*     1 */ RT_OFFSETOF(VMXVVMCS, HostCs),
     74        /*     2 */ RT_OFFSETOF(VMXVVMCS, HostSs),
     75        /*     3 */ RT_OFFSETOF(VMXVVMCS, HostDs),
     76        /*     4 */ RT_OFFSETOF(VMXVVMCS, HostFs),
     77        /*     5 */ RT_OFFSETOF(VMXVVMCS, HostGs),
     78        /*     6 */ RT_OFFSETOF(VMXVVMCS, HostTr),
    7979        /*  7-14 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    8080        /* 15-22 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    112112    /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
    113113    {
    114         /*     0 */ RT_OFFSET(VMXVVMCS, u64GuestPhysAddr),
    115         /*   1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
     114        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64GuestPhysAddr),
     115        /*   1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    116116        /*  9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    117117        /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    120120    /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
    121121    {
    122         /*     0 */ RT_OFFSET(VMXVVMCS, u64VmcsLinkPtr),
    123         /*     1 */ RT_OFFSET(VMXVVMCS, u64GuestDebugCtlMsr),
    124         /*     2 */ RT_OFFSET(VMXVVMCS, u64GuestPatMsr),
    125         /*     3 */ RT_OFFSET(VMXVVMCS, u64GuestEferMsr),
    126         /*     4 */ RT_OFFSET(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
    127         /*     5 */ RT_OFFSET(VMXVVMCS, u64GuestPdpte0),
    128         /*     6 */ RT_OFFSET(VMXVVMCS, u64GuestPdpte1),
    129         /*     7 */ RT_OFFSET(VMXVVMCS, u64GuestPdpte2),
    130         /*     8 */ RT_OFFSET(VMXVVMCS, u64GuestPdpte3),
    131         /*     9 */ RT_OFFSET(VMXVVMCS, u64GuestBndcfgsMsr),
    132         /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
     122        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64VmcsLinkPtr),
     123        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64GuestDebugCtlMsr),
     124        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64GuestPatMsr),
     125        /*     3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEferMsr),
     126        /*     4 */ RT_OFFSETOF(VMXVVMCS, u64GuestPerfGlobalCtlMsr),
     127        /*     5 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte0),
     128        /*     6 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte1),
     129        /*     7 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte2),
     130        /*     8 */ RT_OFFSETOF(VMXVVMCS, u64GuestPdpte3),
     131        /*     9 */ RT_OFFSETOF(VMXVVMCS, u64GuestBndcfgsMsr),
     132        /* 10-17 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    133133        /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
    134134    },
    135135    /* VMX_VMCS_ENC_WIDTH_64BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
    136136    {
    137         /*     0 */ RT_OFFSET(VMXVVMCS, u64HostPatMsr),
    138         /*     1 */ RT_OFFSET(VMXVVMCS, u64HostEferMsr),
    139         /*     2 */ RT_OFFSET(VMXVVMCS, u64HostPerfGlobalCtlMsr),
     137        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64HostPatMsr),
     138        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64HostEferMsr),
     139        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64HostPerfGlobalCtlMsr),
    140140        /*  3-10 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    141141        /* 11-18 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    144144    /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_CONTROL: */
    145145    {
    146         /*     0 */ RT_OFFSET(VMXVVMCS, u32PinCtls),
    147         /*     1 */ RT_OFFSET(VMXVVMCS, u32ProcCtls),
    148         /*     2 */ RT_OFFSET(VMXVVMCS, u32XcptBitmap),
    149         /*     3 */ RT_OFFSET(VMXVVMCS, u32XcptPFMask),
    150         /*     4 */ RT_OFFSET(VMXVVMCS, u32XcptPFMatch),
    151         /*     5 */ RT_OFFSET(VMXVVMCS, u32Cr3TargetCount),
    152         /*     6 */ RT_OFFSET(VMXVVMCS, u32ExitCtls),
    153         /*     7 */ RT_OFFSET(VMXVVMCS, u32ExitMsrStoreCount),
    154         /*     8 */ RT_OFFSET(VMXVVMCS, u32ExitMsrLoadCount),
    155         /*     9 */ RT_OFFSET(VMXVVMCS, u32EntryCtls),
    156         /*    10 */ RT_OFFSET(VMXVVMCS, u32EntryMsrLoadCount),
    157         /*    11 */ RT_OFFSET(VMXVVMCS, u32EntryIntInfo),
    158         /*    12 */ RT_OFFSET(VMXVVMCS, u32EntryXcptErrCode),
    159         /*    13 */ RT_OFFSET(VMXVVMCS, u32EntryInstrLen),
    160         /*    14 */ RT_OFFSET(VMXVVMCS, u32TprTreshold),
    161         /*    15 */ RT_OFFSET(VMXVVMCS, u32ProcCtls2),
    162         /*    16 */ RT_OFFSET(VMXVVMCS, u32PleGap),
    163         /*    17 */ RT_OFFSET(VMXVVMCS, u32PleWindow),
     146        /*     0 */ RT_OFFSETOF(VMXVVMCS, u32PinCtls),
     147        /*     1 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls),
     148        /*     2 */ RT_OFFSETOF(VMXVVMCS, u32XcptBitmap),
     149        /*     3 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMask),
     150        /*     4 */ RT_OFFSETOF(VMXVVMCS, u32XcptPFMatch),
     151        /*     5 */ RT_OFFSETOF(VMXVVMCS, u32Cr3TargetCount),
     152        /*     6 */ RT_OFFSETOF(VMXVVMCS, u32ExitCtls),
     153        /*     7 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrStoreCount),
     154        /*     8 */ RT_OFFSETOF(VMXVVMCS, u32ExitMsrLoadCount),
     155        /*     9 */ RT_OFFSETOF(VMXVVMCS, u32EntryCtls),
     156        /*    10 */ RT_OFFSETOF(VMXVVMCS, u32EntryMsrLoadCount),
     157        /*    11 */ RT_OFFSETOF(VMXVVMCS, u32EntryIntInfo),
     158        /*    12 */ RT_OFFSETOF(VMXVVMCS, u32EntryXcptErrCode),
     159        /*    13 */ RT_OFFSETOF(VMXVVMCS, u32EntryInstrLen),
     160        /*    14 */ RT_OFFSETOF(VMXVVMCS, u32TprTreshold),
     161        /*    15 */ RT_OFFSETOF(VMXVVMCS, u32ProcCtls2),
     162        /*    16 */ RT_OFFSETOF(VMXVVMCS, u32PleGap),
     163        /*    17 */ RT_OFFSETOF(VMXVVMCS, u32PleWindow),
    164164        /* 18-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
    165165    },
    166166    /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
    167167    {
    168         /*     0 */ RT_OFFSET(VMXVVMCS, u32RoVmInstrError),
    169         /*     1 */ RT_OFFSET(VMXVVMCS, u32RoVmExitReason),
    170         /*     2 */ RT_OFFSET(VMXVVMCS, u32RoVmExitIntInfo),
    171         /*     3 */ RT_OFFSET(VMXVVMCS, u32RoVmExitErrCode),
    172         /*     4 */ RT_OFFSET(VMXVVMCS, u32RoIdtVectoringInfo),
    173         /*     5 */ RT_OFFSET(VMXVVMCS, u32RoIdtVectoringErrCode),
    174         /*     6 */ RT_OFFSET(VMXVVMCS, u32RoVmExitInstrLen),
    175         /*     7 */ RT_OFFSET(VMXVVMCS, u32RoVmExitInstrInfo),
     168        /*     0 */ RT_OFFSETOF(VMXVVMCS, u32RoVmInstrError),
     169        /*     1 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitReason),
     170        /*     2 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitIntInfo),
     171        /*     3 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitErrCode),
     172        /*     4 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringInfo),
     173        /*     5 */ RT_OFFSETOF(VMXVVMCS, u32RoIdtVectoringErrCode),
     174        /*     6 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrLen),
     175        /*     7 */ RT_OFFSETOF(VMXVVMCS, u32RoVmExitInstrInfo),
    176176        /*  8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    177177        /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    180180    /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
    181181    {
    182         /*     0 */ RT_OFFSET(VMXVVMCS, u32GuestEsLimit),
    183         /*     1 */ RT_OFFSET(VMXVVMCS, u32GuestCsLimit),
    184         /*     2 */ RT_OFFSET(VMXVVMCS, u32GuestSsLimit),
    185         /*     3 */ RT_OFFSET(VMXVVMCS, u32GuestDsLimit),
    186         /*     4 */ RT_OFFSET(VMXVVMCS, u32GuestEsLimit),
    187         /*     5 */ RT_OFFSET(VMXVVMCS, u32GuestFsLimit),
    188         /*     6 */ RT_OFFSET(VMXVVMCS, u32GuestGsLimit),
    189         /*     7 */ RT_OFFSET(VMXVVMCS, u32GuestLdtrLimit),
    190         /*     8 */ RT_OFFSET(VMXVVMCS, u32GuestTrLimit),
    191         /*     9 */ RT_OFFSET(VMXVVMCS, u32GuestGdtrLimit),
    192         /*    10 */ RT_OFFSET(VMXVVMCS, u32GuestIdtrLimit),
    193         /*    11 */ RT_OFFSET(VMXVVMCS, u32GuestEsAttr),
    194         /*    12 */ RT_OFFSET(VMXVVMCS, u32GuestCsAttr),
    195         /*    13 */ RT_OFFSET(VMXVVMCS, u32GuestSsAttr),
    196         /*    14 */ RT_OFFSET(VMXVVMCS, u32GuestDsAttr),
    197         /*    15 */ RT_OFFSET(VMXVVMCS, u32GuestFsAttr),
    198         /*    16 */ RT_OFFSET(VMXVVMCS, u32GuestGsAttr),
    199         /*    17 */ RT_OFFSET(VMXVVMCS, u32GuestLdtrAttr),
    200         /*    18 */ RT_OFFSET(VMXVVMCS, u32GuestTrAttr),
    201         /*    19 */ RT_OFFSET(VMXVVMCS, u32GuestIntrState),
    202         /*    20 */ RT_OFFSET(VMXVVMCS, u32GuestActivityState),
    203         /*    21 */ RT_OFFSET(VMXVVMCS, u32GuestSmBase),
    204         /*    22 */ RT_OFFSET(VMXVVMCS, u32GuestSysenterCS),
    205         /*    23 */ RT_OFFSET(VMXVVMCS, u32PreemptTimer),
     182        /*     0 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
     183        /*     1 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsLimit),
     184        /*     2 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsLimit),
     185        /*     3 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsLimit),
     186        /*     4 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsLimit),
     187        /*     5 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsLimit),
     188        /*     6 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsLimit),
     189        /*     7 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrLimit),
     190        /*     8 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrLimit),
     191        /*     9 */ RT_OFFSETOF(VMXVVMCS, u32GuestGdtrLimit),
     192        /*    10 */ RT_OFFSETOF(VMXVVMCS, u32GuestIdtrLimit),
     193        /*    11 */ RT_OFFSETOF(VMXVVMCS, u32GuestEsAttr),
     194        /*    12 */ RT_OFFSETOF(VMXVVMCS, u32GuestCsAttr),
     195        /*    13 */ RT_OFFSETOF(VMXVVMCS, u32GuestSsAttr),
     196        /*    14 */ RT_OFFSETOF(VMXVVMCS, u32GuestDsAttr),
     197        /*    15 */ RT_OFFSETOF(VMXVVMCS, u32GuestFsAttr),
     198        /*    16 */ RT_OFFSETOF(VMXVVMCS, u32GuestGsAttr),
     199        /*    17 */ RT_OFFSETOF(VMXVVMCS, u32GuestLdtrAttr),
     200        /*    18 */ RT_OFFSETOF(VMXVVMCS, u32GuestTrAttr),
     201        /*    19 */ RT_OFFSETOF(VMXVVMCS, u32GuestIntrState),
     202        /*    20 */ RT_OFFSETOF(VMXVVMCS, u32GuestActivityState),
     203        /*    21 */ RT_OFFSETOF(VMXVVMCS, u32GuestSmBase),
     204        /*    22 */ RT_OFFSETOF(VMXVVMCS, u32GuestSysenterCS),
     205        /*    23 */ RT_OFFSETOF(VMXVVMCS, u32PreemptTimer),
    206206        /* 24-25 */ UINT16_MAX, UINT16_MAX
    207207    },
    208208    /* VMX_VMCS_ENC_WIDTH_32BIT | VMX_VMCS_ENC_TYPE_HOST_STATE: */
    209209    {
    210         /*     0 */ RT_OFFSET(VMXVVMCS, u32HostSysenterCs),
    211         /*   1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
     210        /*     0 */ RT_OFFSETOF(VMXVVMCS, u32HostSysenterCs),
     211        /*   1-8 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    212212        /*  9-16 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    213213        /* 17-24 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    216216    /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_CONTROL: */
    217217    {
    218         /*     0 */ RT_OFFSET(VMXVVMCS, u64Cr0Mask),
    219         /*     1 */ RT_OFFSET(VMXVVMCS, u64Cr4Mask),
    220         /*     2 */ RT_OFFSET(VMXVVMCS, u64Cr0ReadShadow),
    221         /*     3 */ RT_OFFSET(VMXVVMCS, u64Cr4ReadShadow),
    222         /*     4 */ RT_OFFSET(VMXVVMCS, u64Cr3Target0),
    223         /*     5 */ RT_OFFSET(VMXVVMCS, u64Cr3Target1),
    224         /*     6 */ RT_OFFSET(VMXVVMCS, u64Cr3Target2),
    225         /*     7 */ RT_OFFSET(VMXVVMCS, u64Cr3Target3),
     218        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64Cr0Mask),
     219        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64Cr4Mask),
     220        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64Cr0ReadShadow),
     221        /*     3 */ RT_OFFSETOF(VMXVVMCS, u64Cr4ReadShadow),
     222        /*     4 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target0),
     223        /*     5 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target1),
     224        /*     6 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target2),
     225        /*     7 */ RT_OFFSETOF(VMXVVMCS, u64Cr3Target3),
    226226        /*  8-15 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    227227        /* 16-23 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    230230    /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_VMEXIT_INFO: */
    231231    {
    232         /*     0 */ RT_OFFSET(VMXVVMCS, u64ExitQual),
    233         /*     1 */ RT_OFFSET(VMXVVMCS, u64IoRcx),
    234         /*     2 */ RT_OFFSET(VMXVVMCS, u64IoRsi),
    235         /*     3 */ RT_OFFSET(VMXVVMCS, u64IoRdi),
    236         /*     4 */ RT_OFFSET(VMXVVMCS, u64IoRip),
    237         /*     5 */ RT_OFFSET(VMXVVMCS, u64GuestLinearAddr),
     232        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64ExitQual),
     233        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64IoRcx),
     234        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64IoRsi),
     235        /*     3 */ RT_OFFSETOF(VMXVVMCS, u64IoRdi),
     236        /*     4 */ RT_OFFSETOF(VMXVVMCS, u64IoRip),
     237        /*     5 */ RT_OFFSETOF(VMXVVMCS, u64GuestLinearAddr),
    238238        /*  6-13 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    239239        /* 14-21 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
     
    242242    /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_GUEST_STATE: */
    243243    {
    244         /*     0 */ RT_OFFSET(VMXVVMCS, u64GuestCr0),
    245         /*     1 */ RT_OFFSET(VMXVVMCS, u64GuestCr3),
    246         /*     2 */ RT_OFFSET(VMXVVMCS, u64GuestCr4),
    247         /*     3 */ RT_OFFSET(VMXVVMCS, u64GuestEsBase),
    248         /*     4 */ RT_OFFSET(VMXVVMCS, u64GuestCsBase),
    249         /*     5 */ RT_OFFSET(VMXVVMCS, u64GuestSsBase),
    250         /*     6 */ RT_OFFSET(VMXVVMCS, u64GuestDsBase),
    251         /*     7 */ RT_OFFSET(VMXVVMCS, u64GuestFsBase),
    252         /*     8 */ RT_OFFSET(VMXVVMCS, u64GuestGsBase),
    253         /*     9 */ RT_OFFSET(VMXVVMCS, u64GuestLdtrBase),
    254         /*    10 */ RT_OFFSET(VMXVVMCS, u64GuestTrBase),
    255         /*    11 */ RT_OFFSET(VMXVVMCS, u64GuestGdtrBase),
    256         /*    12 */ RT_OFFSET(VMXVVMCS, u64GuestIdtrBase),
    257         /*    13 */ RT_OFFSET(VMXVVMCS, u64GuestDr7),
    258         /*    14 */ RT_OFFSET(VMXVVMCS, u64GuestRsp),
    259         /*    15 */ RT_OFFSET(VMXVVMCS, u64GuestRip),
    260         /*    16 */ RT_OFFSET(VMXVVMCS, u64GuestRFlags),
    261         /*    17 */ RT_OFFSET(VMXVVMCS, u64GuestPendingDbgXcpt),
    262         /*    18 */ RT_OFFSET(VMXVVMCS, u64GuestSysenterEsp),
    263         /*    19 */ RT_OFFSET(VMXVVMCS, u64GuestSysenterEip),
     244        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr0),
     245        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr3),
     246        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64GuestCr4),
     247        /*     3 */ RT_OFFSETOF(VMXVVMCS, u64GuestEsBase),
     248        /*     4 */ RT_OFFSETOF(VMXVVMCS, u64GuestCsBase),
     249        /*     5 */ RT_OFFSETOF(VMXVVMCS, u64GuestSsBase),
     250        /*     6 */ RT_OFFSETOF(VMXVVMCS, u64GuestDsBase),
     251        /*     7 */ RT_OFFSETOF(VMXVVMCS, u64GuestFsBase),
     252        /*     8 */ RT_OFFSETOF(VMXVVMCS, u64GuestGsBase),
     253        /*     9 */ RT_OFFSETOF(VMXVVMCS, u64GuestLdtrBase),
     254        /*    10 */ RT_OFFSETOF(VMXVVMCS, u64GuestTrBase),
     255        /*    11 */ RT_OFFSETOF(VMXVVMCS, u64GuestGdtrBase),
     256        /*    12 */ RT_OFFSETOF(VMXVVMCS, u64GuestIdtrBase),
     257        /*    13 */ RT_OFFSETOF(VMXVVMCS, u64GuestDr7),
     258        /*    14 */ RT_OFFSETOF(VMXVVMCS, u64GuestRsp),
     259        /*    15 */ RT_OFFSETOF(VMXVVMCS, u64GuestRip),
     260        /*    16 */ RT_OFFSETOF(VMXVVMCS, u64GuestRFlags),
     261        /*    17 */ RT_OFFSETOF(VMXVVMCS, u64GuestPendingDbgXcpt),
     262        /*    18 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEsp),
     263        /*    19 */ RT_OFFSETOF(VMXVVMCS, u64GuestSysenterEip),
    264264        /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
    265265    },
    266266    /* VMX_VMCS_ENC_WIDTH_NATURAL | VMX_VMCS_ENC_TYPE_HOST_STATE: */
    267267    {
    268         /*     0 */ RT_OFFSET(VMXVVMCS, u64HostCr0),
    269         /*     1 */ RT_OFFSET(VMXVVMCS, u64HostCr3),
    270         /*     2 */ RT_OFFSET(VMXVVMCS, u64HostCr4),
    271         /*     3 */ RT_OFFSET(VMXVVMCS, u64HostFsBase),
    272         /*     4 */ RT_OFFSET(VMXVVMCS, u64HostGsBase),
    273         /*     5 */ RT_OFFSET(VMXVVMCS, u64HostTrBase),
    274         /*     6 */ RT_OFFSET(VMXVVMCS, u64HostGdtrBase),
    275         /*     7 */ RT_OFFSET(VMXVVMCS, u64HostIdtrBase),
    276         /*     8 */ RT_OFFSET(VMXVVMCS, u64HostSysenterEsp),
    277         /*     9 */ RT_OFFSET(VMXVVMCS, u64HostSysenterEip),
    278         /*    10 */ RT_OFFSET(VMXVVMCS, u64HostRsp),
    279         /*    11 */ RT_OFFSET(VMXVVMCS, u64HostRip),
     268        /*     0 */ RT_OFFSETOF(VMXVVMCS, u64HostCr0),
     269        /*     1 */ RT_OFFSETOF(VMXVVMCS, u64HostCr3),
     270        /*     2 */ RT_OFFSETOF(VMXVVMCS, u64HostCr4),
     271        /*     3 */ RT_OFFSETOF(VMXVVMCS, u64HostFsBase),
     272        /*     4 */ RT_OFFSETOF(VMXVVMCS, u64HostGsBase),
     273        /*     5 */ RT_OFFSETOF(VMXVVMCS, u64HostTrBase),
     274        /*     6 */ RT_OFFSETOF(VMXVVMCS, u64HostGdtrBase),
     275        /*     7 */ RT_OFFSETOF(VMXVVMCS, u64HostIdtrBase),
     276        /*     8 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEsp),
     277        /*     9 */ RT_OFFSETOF(VMXVVMCS, u64HostSysenterEip),
     278        /*    10 */ RT_OFFSETOF(VMXVVMCS, u64HostRsp),
     279        /*    11 */ RT_OFFSETOF(VMXVVMCS, u64HostRip),
    280280        /* 12-19 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX,
    281281        /* 20-25 */ UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX, UINT16_MAX
     
    360360    } while (0)
    361361# endif /* !IEM_WITH_CODE_TLB */
     362
     363/** Whether a current VMCS is present for the given VCPU. */
     364#define IEM_VMX_HAS_CURRENT_VMCS(a_pVCpu)       RT_BOOL((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
     365
     366/** Gets the current VMCS for the given VCPU. */
     367#define IEM_VMX_GET_CURRENT_VMCS(a_pVCpu)       ((a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs)
     368
     369/** Sets a new VMCS as the current VMCS for the given VCPU. */
     370#define IEM_VMX_SET_CURRENT_VMCS(a_pVCpu, a_GCPhysVmcs) \
     371    do \
     372    { \
     373        Assert((a_GCPhysVmcs) != NIL_RTGCPHYS); \
     374        (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = (a_GCPhysVmcs); \
     375    } while (0)
     376
     377/** Clears any current VMCS for the given VCPU. */
     378#define IEM_VMX_CLEAR_CURRENT_VMCS(a_pVCpu) \
     379    do \
     380    { \
     381        (a_pVCpu)->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = NIL_RTGCPHYS; \
     382    } while (0)
    362383
    363384
     
    966987DECLINLINE(void) iemVmxVmFailValid(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
    967988{
    968     if (pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
     989    if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
    969990    {
    970991        pVCpu->cpum.GstCtx.eflags.u32 &= ~(X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF);
     
    9841005DECLINLINE(void) iemVmxVmFail(PVMCPU pVCpu, VMXINSTRERR enmInsErr)
    9851006{
    986     if (pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs != NIL_RTGCPHYS)
     1007    if (IEM_VMX_HAS_CURRENT_VMCS(pVCpu))
    9871008    {
    9881009        iemVmxVmFailValid(pVCpu, enmInsErr);
     
    9911012    else
    9921013        iemVmxVmFailInvalid(pVCpu);
     1014}
     1015
     1016
     1017/**
     1018 * Flushes the current VMCS contents back to guest memory.
     1019 *
     1020 * @returns VBox status code.
     1021 * @param   pVCpu           The cross context virtual CPU structure.
     1022 */
     1023DECLINLINE(int) iemVmxCommitCurrentVmcsToMemory(PVMCPU pVCpu)
     1024{
     1025    Assert(IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
     1026    int rc = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), IEM_VMX_GET_CURRENT_VMCS(pVCpu),
     1027                                      pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs), sizeof(VMXVVMCS));
     1028    IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
     1029    return rc;
    9931030}
    9941031
     
    10241061    }
    10251062
    1026     /** @todo NSTVMX: VMCLEAR impl.  */
    1027     RT_NOREF(GCPtrVmcs); RT_NOREF(pExitInstrInfo); RT_NOREF(cbInstr);
    1028     return VINF_SUCCESS;
     1063    /* Get the VMCS pointer from the location specified by the source memory operand. */
     1064    RTGCPHYS GCPhysVmcs;
     1065    VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmcs, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmcs);
     1066    if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
     1067    {
     1068        Log(("vmclear: Failed to read VMCS physaddr from %#RGv, rc=%Rrc\n", GCPtrVmcs, VBOXSTRICTRC_VAL(rcStrict)));
     1069        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_PtrMap;
     1070        return rcStrict;
     1071    }
     1072
     1073    /* VMCS pointer alignment. */
     1074    if (GCPhysVmcs & X86_PAGE_4K_OFFSET_MASK)
     1075    {
     1076        Log(("vmclear: VMCS pointer not page-aligned -> VMFail()\n"));
     1077        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAlign;
     1078        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
     1079        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     1080        return VINF_SUCCESS;
     1081    }
     1082
     1083    /* VMCS physical-address width limits. */
     1084    Assert(!VMX_V_VMCS_PHYSADDR_4G_LIMIT);
     1085    if (GCPhysVmcs >> IEM_GET_GUEST_CPU_FEATURES(pVCpu)->cMaxPhysAddrWidth)
     1086    {
     1087        Log(("vmclear: VMCS pointer extends beyond physical-address width -> VMFail()\n"));
     1088        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrWidth;
     1089        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
     1090        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     1091        return VINF_SUCCESS;
     1092    }
     1093
     1094    /* VMCS is not the VMXON region. */
     1095    if (GCPhysVmcs == pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon)
     1096    {
     1097        Log(("vmclear: VMCS pointer cannot be identical to VMXON region pointer -> VMFail()\n"));
     1098        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrVmxon;
     1099        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_VMXON_PTR);
     1100        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     1101        return VINF_SUCCESS;
     1102    }
     1103
     1104    /* Ensure VMCS is not MMIO, ROM etc. This is not an Intel requirement but a
     1105       restriction imposed by our implementation. */
     1106    if (!PGMPhysIsGCPhysNormal(pVCpu->CTX_SUFF(pVM), GCPhysVmcs))
     1107    {
     1108        Log(("vmclear: VMCS not normal memory -> VMFail()\n"));
     1109        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_PtrAbnormal;
     1110        iemVmxVmFail(pVCpu, VMXINSTRERR_VMCLEAR_INVALID_PHYSADDR);
     1111        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     1112        return VINF_SUCCESS;
     1113    }
     1114
     1115    /*
     1116     * VMCLEAR allows committing and clearing any valid VMCS pointer.
     1117     *
     1118     * If the current VMCS is the one being cleared, set its state to 'clear' and commit
     1119     * to guest memory. Otherwise, set the state of the VMCS referenced in guest memory
     1120     * to 'clear'.
     1121     */
     1122    uint8_t const fVmcsStateClear = VMX_V_VMCS_STATE_CLEAR;
     1123    if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) == GCPhysVmcs)
     1124    {
     1125        Assert(GCPhysVmcs != NIL_RTGCPHYS); /* Paranoia. */
     1126        pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs)->fVmcsState = fVmcsStateClear;
     1127        iemVmxCommitCurrentVmcsToMemory(pVCpu);
     1128        Assert(!IEM_VMX_HAS_CURRENT_VMCS(pVCpu));
     1129    }
     1130    else
     1131    {
     1132        rcStrict = PGMPhysSimpleWriteGCPhys(pVCpu->CTX_SUFF(pVM), GCPtrVmcs + RT_OFFSETOF(VMXVVMCS, fVmcsState),
     1133                                            (const void *)&fVmcsStateClear, sizeof(fVmcsStateClear));
     1134    }
     1135
     1136    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmclear_Success;
     1137    iemVmxVmSucceed(pVCpu);
     1138    iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     1139    return rcStrict;
    10291140}
    10301141
     
    10621173
    10631174    /* Set the VMCS pointer to the location specified by the destination memory operand. */
    1064     Assert(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
     1175    AssertCompile(NIL_RTGCPHYS == ~(RTGCPHYS)0U);
    10651176    VBOXSTRICTRC rcStrict = iemMemStoreDataU64(pVCpu, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmcs,
    1066                                                pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs);
     1177                                               IEM_VMX_GET_CURRENT_VMCS(pVCpu));
    10671178    if (RT_LIKELY(rcStrict == VINF_SUCCESS))
    10681179    {
     1180        pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_Success;
    10691181        iemVmxVmSucceed(pVCpu);
    10701182        iemRegAddToRipAndClearRF(pVCpu, cbInstr);
     
    10721184    }
    10731185
    1074     Log(("vmptrld: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
     1186    Log(("vmptrst: Failed to store VMCS pointer to memory at destination operand %#Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
    10751187    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrst_PtrMap;
    10761188    return rcStrict;
     
    11931305    }
    11941306
    1195     pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs = GCPhysVmcs;
     1307    /*
     1308     * We only maintain only the current VMCS in our virtual CPU context (CPUMCTX). Therefore,
     1309     * VMPTRLD shall always flush any existing current VMCS back to guest memory before loading
     1310     * a new VMCS as current.
     1311     */
     1312    if (IEM_VMX_GET_CURRENT_VMCS(pVCpu) != GCPhysVmcs)
     1313    {
     1314        iemVmxCommitCurrentVmcsToMemory(pVCpu);
     1315        IEM_VMX_SET_CURRENT_VMCS(pVCpu, GCPhysVmcs);
     1316    }
    11961317    pVCpu->cpum.GstCtx.hwvirt.vmx.enmInstrDiag = kVmxVInstrDiag_Vmptrld_Success;
    11971318    iemVmxVmSucceed(pVCpu);
     
    13431464         */
    13441465        pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmxon    = GCPhysVmxon;
    1345         pVCpu->cpum.GstCtx.hwvirt.vmx.GCPhysVmcs     = NIL_RTGCPHYS;
     1466        IEM_VMX_CLEAR_CURRENT_VMCS(pVCpu);
    13461467        pVCpu->cpum.GstCtx.hwvirt.vmx.fInVmxRootMode = true;
    13471468        /** @todo NSTVMX: clear address-range monitoring. */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette