Changeset 73710 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 16, 2018 10:23:05 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllCImplVmxInstr.cpp.h
r73628 r73710 31 31 32 32 /** 33 * Gets the ModR/M and displacement byte(s) from decoded opcodes given their 34 * relative offsets. 35 */ 36 # ifdef IEM_WITH_CODE_TLB 37 # define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) do { } while (0) 38 # define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) do { } while (0) 39 # define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0) 40 # define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) do { } while (0) 41 # define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0) 42 # define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) do { } while (0) 43 # define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0) 44 # define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) do { } while (0) 45 # error "Implement me: Getting ModR/M, disp. has to work even when the instruction crosses a page boundary." 46 # else /* !IEM_WITH_CODE_TLB */ 47 # define IEM_MODRM_GET_U8(a_pVCpu, a_bModRm, a_offModRm) \ 48 do \ 49 { \ 50 Assert((a_offModRm) < (a_pVCpu)->iem.s.cbOpcode); \ 51 (a_bModRm) = (a_pVCpu)->iem.s.abOpcode[(a_offModRm)]; \ 52 } while (0) 53 54 # define IEM_SIB_GET_U8(a_pVCpu, a_bSib, a_offSib) IEM_MODRM_GET_U8(a_pVCpu, a_bSib, a_offSib) 55 56 # define IEM_DISP_GET_U16(a_pVCpu, a_u16Disp, a_offDisp) \ 57 do \ 58 { \ 59 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \ 60 uint8_t const bTmpLo = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \ 61 uint8_t const bTmpHi = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \ 62 (a_u16Disp) = RT_MAKE_U16(bTmpLo, bTmpHi); \ 63 } while (0) 64 65 # define IEM_DISP_GET_S8_SX_U16(a_pVCpu, a_u16Disp, a_offDisp) \ 66 do \ 67 { \ 68 Assert((a_offDisp) < (a_pVCpu)->iem.s.cbOpcode); \ 69 (a_u16Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \ 70 } while (0) 71 72 # define IEM_DISP_GET_U32(a_pVCpu, a_u32Disp, a_offDisp) \ 73 do \ 74 { \ 75 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \ 76 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \ 77 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \ 78 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \ 79 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \ 80 (a_u32Disp) = RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \ 81 } while (0) 82 83 # define IEM_DISP_GET_S8_SX_U32(a_pVCpu, a_u32Disp, a_offDisp) \ 84 do \ 85 { \ 86 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \ 87 (a_u32Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \ 88 } while (0) 89 90 # define IEM_DISP_GET_S8_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \ 91 do \ 92 { \ 93 Assert((a_offDisp) + 1 < (a_pVCpu)->iem.s.cbOpcode); \ 94 (a_u64Disp) = (int8_t)((a_pVCpu)->iem.s.abOpcode[(a_offDisp)]); \ 95 } while (0) 96 97 # define IEM_DISP_GET_S32_SX_U64(a_pVCpu, a_u64Disp, a_offDisp) \ 98 do \ 99 { \ 100 Assert((a_offDisp) + 3 < (a_pVCpu)->iem.s.cbOpcode); \ 101 uint8_t const bTmp0 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp)]; \ 102 uint8_t const bTmp1 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 1]; \ 103 uint8_t const bTmp2 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 2]; \ 104 uint8_t const bTmp3 = (a_pVCpu)->iem.s.abOpcode[(a_offDisp) + 3]; \ 105 (a_u64Disp) = (int32_t)RT_MAKE_U32_FROM_U8(bTmp0, bTmp1, bTmp2, bTmp3); \ 106 } while (0) 107 # endif /* !IEM_WITH_CODE_TLB */ 108 109 /** 110 * Gets VM-exit instruction information along with any displacement for an 111 * instruction VM-exit. 112 * 113 * @returns The VM-exit instruction information. 114 * @param pVCpu The cross context virtual CPU structure. 115 * @param uExitReason The VM-exit reason. 116 * @param InstrId The VM-exit instruction identity (VMX_INSTR_ID_XXX) if 117 * any. Pass VMX_INSTR_ID_NONE otherwise. 118 * @param pGCPtrDisp Where to store the displacement field. Optional, can be 119 * NULL. 120 */ 121 IEM_STATIC uint32_t iemVmxGetExitInstrInfo(PVMCPU pVCpu, uint32_t uExitReason, VMXINSTRID InstrId, PRTGCPTR pGCPtrDisp) 122 { 123 RTGCPTR GCPtrDisp; 124 VMXEXITINSTRINFO ExitInstrInfo; 125 ExitInstrInfo.u = 0; 126 127 /* 128 * Get and parse the ModR/M byte from our decoded opcodes. 129 */ 130 uint8_t bRm; 131 uint8_t const offModRm = pVCpu->iem.s.offModRm; 132 IEM_MODRM_GET_U8(pVCpu, bRm, offModRm); 133 if ((bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT)) 134 { 135 /* 136 * ModR/M indicates register addressing. 137 */ 138 ExitInstrInfo.All.u2Scaling = 0; 139 ExitInstrInfo.All.iReg1 = (bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB; 140 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 141 ExitInstrInfo.All.fIsRegOperand = 1; 142 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize; 143 ExitInstrInfo.All.iSegReg = 0; 144 ExitInstrInfo.All.iIdxReg = 0; 145 ExitInstrInfo.All.fIdxRegInvalid = 1; 146 ExitInstrInfo.All.iBaseReg = 0; 147 ExitInstrInfo.All.fBaseRegInvalid = 1; 148 ExitInstrInfo.All.iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 149 150 /* Displacement not applicable for register addressing. */ 151 GCPtrDisp = 0; 152 } 153 else 154 { 155 /* 156 * ModR/M indicates memory addressing. 157 */ 158 uint8_t uScale = 0; 159 bool fBaseRegValid = false; 160 bool fIdxRegValid = false; 161 uint8_t iBaseReg = 0; 162 uint8_t iIdxReg = 0; 163 uint8_t iReg2 = 0; 164 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT) 165 { 166 /* 167 * Parse the ModR/M, displacement for 16-bit addressing mode. 168 * See Intel instruction spec. Table 2-1. "16-Bit Addressing Forms with the ModR/M Byte". 169 */ 170 uint16_t u16Disp = 0; 171 uint8_t const offDisp = offModRm + sizeof(bRm); 172 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6) 173 { 174 /* Displacement without any registers. */ 175 IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); 176 } 177 else 178 { 179 /* Register (index and base). */ 180 switch (bRm & X86_MODRM_RM_MASK) 181 { 182 case 0: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break; 183 case 1: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break; 184 case 2: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break; 185 case 3: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break; 186 case 4: fIdxRegValid = true; iIdxReg = X86_GREG_xSI; break; 187 case 5: fIdxRegValid = true; iIdxReg = X86_GREG_xDI; break; 188 case 6: fBaseRegValid = true; iBaseReg = X86_GREG_xBP; break; 189 case 7: fBaseRegValid = true; iBaseReg = X86_GREG_xBX; break; 190 } 191 192 /* Register + displacement. */ 193 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 194 { 195 case 0: break; 196 case 1: IEM_DISP_GET_S8_SX_U16(pVCpu, u16Disp, offDisp); break; 197 case 2: IEM_DISP_GET_U16(pVCpu, u16Disp, offDisp); break; 198 default: 199 { 200 /* Register addressing, handled at the beginning. */ 201 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm)); 202 break; 203 } 204 } 205 } 206 207 Assert(!uScale); /* There's no scaling/SIB byte for 16-bit addressing. */ 208 GCPtrDisp = (int16_t)u16Disp; /* Sign-extend the displacement. */ 209 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 210 } 211 else if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT) 212 { 213 /* 214 * Parse the ModR/M, SIB, displacement for 32-bit addressing mode. 215 * See Intel instruction spec. Table 2-2. "32-Bit Addressing Forms with the ModR/M Byte". 216 */ 217 uint32_t u32Disp = 0; 218 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5) 219 { 220 /* Displacement without any registers. */ 221 uint8_t const offDisp = offModRm + sizeof(bRm); 222 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); 223 } 224 else 225 { 226 /* Register (and perhaps scale, index and base). */ 227 uint8_t offDisp = offModRm + sizeof(bRm); 228 iBaseReg = (bRm & X86_MODRM_RM_MASK); 229 if (iBaseReg == 4) 230 { 231 /* An SIB byte follows the ModR/M byte, parse it. */ 232 uint8_t bSib; 233 uint8_t const offSib = offModRm + sizeof(bRm); 234 IEM_SIB_GET_U8(pVCpu, bSib, offSib); 235 236 /* A displacement may follow SIB, update its offset. */ 237 offDisp += sizeof(bSib); 238 239 /* Get the scale. */ 240 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 241 242 /* Get the index register. */ 243 iIdxReg = (bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK; 244 fIdxRegValid = RT_BOOL(iIdxReg != 4); 245 246 /* Get the base register. */ 247 iBaseReg = bSib & X86_SIB_BASE_MASK; 248 fBaseRegValid = true; 249 if (iBaseReg == 5) 250 { 251 if ((bRm & X86_MODRM_MOD_MASK) == 0) 252 { 253 /* Mod is 0 implies a 32-bit displacement with no base. */ 254 fBaseRegValid = false; 255 IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); 256 } 257 else 258 { 259 /* Mod is not 0 implies an 8-bit/32-bit displacement (handled below) with an EBP base. */ 260 iBaseReg = X86_GREG_xBP; 261 } 262 } 263 } 264 265 /* Register + displacement. */ 266 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 267 { 268 case 0: /* Handled above */ break; 269 case 1: IEM_DISP_GET_S8_SX_U32(pVCpu, u32Disp, offDisp); break; 270 case 2: IEM_DISP_GET_U32(pVCpu, u32Disp, offDisp); break; 271 default: 272 { 273 /* Register addressing, handled at the beginning. */ 274 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm)); 275 break; 276 } 277 } 278 } 279 280 GCPtrDisp = (int32_t)u32Disp; /* Sign-extend the displacement. */ 281 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK); 282 } 283 else 284 { 285 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT); 286 287 /* 288 * Parse the ModR/M, SIB, displacement for 64-bit addressing mode. 289 * See Intel instruction spec. 2.2 "IA-32e Mode". 290 */ 291 uint64_t u64Disp = 0; 292 bool const fRipRelativeAddr = RT_BOOL((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5); 293 if (fRipRelativeAddr) 294 { 295 /* 296 * RIP-relative addressing mode. 297 * 298 * The displacment is 32-bit signed implying an offset range of +/-2G. 299 * See Intel instruction spec. 2.2.1.6 "RIP-Relative Addressing". 300 */ 301 uint8_t const offDisp = offModRm + sizeof(bRm); 302 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); 303 } 304 else 305 { 306 uint8_t offDisp = offModRm + sizeof(bRm); 307 308 /* 309 * Register (and perhaps scale, index and base). 310 * 311 * REX.B extends the most-significant bit of the base register. However, REX.B 312 * is ignored while determining whether an SIB follows the opcode. Hence, we 313 * shall OR any REX.B bit -after- inspecting for an SIB byte below. 314 * 315 * See Intel instruction spec. Table 2-5. "Special Cases of REX Encodings". 316 */ 317 iBaseReg = (bRm & X86_MODRM_RM_MASK); 318 if (iBaseReg == 4) 319 { 320 /* An SIB byte follows the ModR/M byte, parse it. Displacement (if any) follows SIB. */ 321 uint8_t bSib; 322 uint8_t const offSib = offModRm + sizeof(bRm); 323 IEM_SIB_GET_U8(pVCpu, bSib, offSib); 324 325 /* Displacement may follow SIB, update its offset. */ 326 offDisp += sizeof(bSib); 327 328 /* Get the scale. */ 329 uScale = (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK; 330 331 /* Get the index. */ 332 iIdxReg = ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex; 333 fIdxRegValid = RT_BOOL(iIdxReg != 4); /* R12 -can- be used as an index register. */ 334 335 /* Get the base. */ 336 iBaseReg = (bSib & X86_SIB_BASE_MASK); 337 fBaseRegValid = true; 338 if (iBaseReg == 5) 339 { 340 if ((bRm & X86_MODRM_MOD_MASK) == 0) 341 { 342 /* Mod is 0 implies a signed 32-bit displacement with no base. */ 343 IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); 344 } 345 else 346 { 347 /* Mod is non-zero implies an 8-bit/32-bit displacement (handled below) with RBP or R13 as base. */ 348 iBaseReg = pVCpu->iem.s.uRexB ? X86_GREG_x13 : X86_GREG_xBP; 349 } 350 } 351 } 352 iBaseReg |= pVCpu->iem.s.uRexB; 353 354 /* Register + displacement. */ 355 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK) 356 { 357 case 0: /* Handled above */ break; 358 case 1: IEM_DISP_GET_S8_SX_U64(pVCpu, u64Disp, offDisp); break; 359 case 2: IEM_DISP_GET_S32_SX_U64(pVCpu, u64Disp, offDisp); break; 360 default: 361 { 362 /* Register addressing, handled at the beginning. */ 363 AssertMsgFailed(("ModR/M %#x implies register addressing, memory addressing expected!", bRm)); 364 break; 365 } 366 } 367 } 368 369 GCPtrDisp = fRipRelativeAddr ? pVCpu->cpum.GstCtx.rip + u64Disp : u64Disp; 370 iReg2 = ((bRm >> X86_MODRM_REG_SHIFT) & X86_MODRM_REG_SMASK) | pVCpu->iem.s.uRexReg; 371 } 372 373 ExitInstrInfo.All.u2Scaling = uScale; 374 ExitInstrInfo.All.iReg1 = 0; /* Not applicable for memory instructions. */ 375 ExitInstrInfo.All.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 376 ExitInstrInfo.All.fIsRegOperand = 0; 377 ExitInstrInfo.All.uOperandSize = pVCpu->iem.s.enmEffOpSize; 378 ExitInstrInfo.All.iSegReg = pVCpu->iem.s.iEffSeg; 379 ExitInstrInfo.All.iIdxReg = iIdxReg; 380 ExitInstrInfo.All.fIdxRegInvalid = !fIdxRegValid; 381 ExitInstrInfo.All.iBaseReg = iBaseReg; 382 ExitInstrInfo.All.iIdxReg = !fBaseRegValid; 383 ExitInstrInfo.All.iReg2 = iReg2; 384 } 385 386 /* 387 * Handle exceptions for certain instructions. 388 * (e.g. some instructions convey an instruction identity). 389 */ 390 switch (uExitReason) 391 { 392 case VMX_EXIT_XDTR_ACCESS: 393 { 394 Assert(VMX_INSTR_ID_IS_VALID(InstrId)); 395 ExitInstrInfo.GdtIdt.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId); 396 ExitInstrInfo.GdtIdt.u2Undef0 = 0; 397 break; 398 } 399 400 case VMX_EXIT_TR_ACCESS: 401 { 402 Assert(VMX_INSTR_ID_IS_VALID(InstrId)); 403 ExitInstrInfo.LdtTr.u2InstrId = VMX_INSTR_ID_GET_ID(InstrId); 404 ExitInstrInfo.GdtIdt.u2Undef0 = 0; 405 break; 406 } 407 408 case VMX_EXIT_RDRAND: 409 case VMX_EXIT_RDSEED: 410 { 411 Assert(ExitInstrInfo.RdrandRdseed.u2OperandSize != 3); 412 break; 413 } 414 } 415 416 /* Update displacement and return the constructed VM-exit instruction information field. */ 417 if (pGCPtrDisp) 418 *pGCPtrDisp = GCPtrDisp; 419 return ExitInstrInfo.u; 420 } 421 422 423 /** 33 424 * Implements VMSucceed for VMX instruction success. 34 425 * … … 95 486 * @param cbInstr The instruction length. 96 487 * @param GCPtrVmxon The linear address of the VMXON pointer. 97 * @param ExitInstrInfo The VM-exit instruction information field.488 * @param pExitInstrInfo Pointer to the VM-exit instruction information field. 98 489 * @param GCPtrDisp The displacement field for @a GCPtrVmxon if any. 99 490 * … … 155 546 /* Get the VMXON pointer from the location specified by the source memory operand. */ 156 547 RTGCPHYS GCPhysVmxon; 157 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, pExitInstrInfo-> InvVmxXsaves.iSegReg, GCPtrVmxon);548 VBOXSTRICTRC rcStrict = iemMemFetchDataU64(pVCpu, &GCPhysVmxon, pExitInstrInfo->VmxXsave.iSegReg, GCPtrVmxon); 158 549 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS)) 159 550 { … … 275 666 IEM_CIMPL_DEF_1(iemCImpl_vmxon, RTGCPTR, GCPtrVmxon) 276 667 { 277 /** @todo NSTVMX: Parse ModR/M, SIB, disp. */ 278 RTGCPTR GCPtrDisp = 0; 668 RTGCPTR GCPtrDisp; 279 669 VMXEXITINSTRINFO ExitInstrInfo; 280 ExitInstrInfo.u = 0; 281 ExitInstrInfo.InvVmxXsaves.u2Scaling = 0; 282 ExitInstrInfo.InvVmxXsaves.u3AddrSize = pVCpu->iem.s.enmEffAddrMode; 283 ExitInstrInfo.InvVmxXsaves.fIsRegOperand = 0; 284 ExitInstrInfo.InvVmxXsaves.iSegReg = pVCpu->iem.s.iEffSeg; 285 ExitInstrInfo.InvVmxXsaves.iIdxReg = 0; 286 ExitInstrInfo.InvVmxXsaves.fIdxRegInvalid = 0; 287 ExitInstrInfo.InvVmxXsaves.iBaseReg = 0; 288 ExitInstrInfo.InvVmxXsaves.fBaseRegInvalid = 0; 289 ExitInstrInfo.InvVmxXsaves.iReg2 = 0; 670 ExitInstrInfo.u = iemVmxGetExitInstrInfo(pVCpu, VMX_EXIT_VMXON, VMX_INSTR_ID_NONE, &GCPtrDisp); 290 671 return iemVmxVmxon(pVCpu, cbInstr, GCPtrVmxon, &ExitInstrInfo, GCPtrDisp); 291 672 }
Note:
See TracChangeset
for help on using the changeset viewer.