Changeset 66457 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Apr 6, 2017 10:44:30 AM (8 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r66392 r66457 8448 8448 * Check the input and figure out which mapping entry to use. 8449 8449 */ 8450 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */8450 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */ 8451 8451 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK))); 8452 8452 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)); … … 11752 11752 do \ 11753 11753 { \ 11754 if ( IEM_IS_REAL_OR_V86_MODE(pVCpu))\11755 11754 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \ 11755 else return IEMOP_RAISE_INVALID_OPCODE(); \ 11756 11756 } while (0) 11757 11757 … … 11839 11839 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \ 11840 11840 } while (0) 11841 11841 11842 #define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \ 11842 11843 do \ … … 11875 11876 } while (0) 11876 11877 11878 11879 /** 11880 * Done decoding VEX. 11881 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if 11882 * we're in real or v8086 mode. 11883 */ 11884 #define IEMOP_HLP_DONE_VEX_DECODING() \ 11885 do \ 11886 { \ 11887 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \ 11888 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \ 11889 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \ 11890 { /* likely */ } \ 11891 else \ 11892 return IEMOP_RAISE_INVALID_OPCODE(); \ 11893 } while (0) 11894 11895 /** 11896 * Done decoding VEX, no V, no L. 11897 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if 11898 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0. 11899 */ 11900 #define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \ 11901 do \ 11902 { \ 11903 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \ 11904 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \ 11905 && pVCpu->iem.s.uVexLength == 0 \ 11906 && pVCpu->iem.s.uVex3rdReg == 0 \ 11907 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \ 11908 { /* likely */ } \ 11909 else \ 11910 return IEMOP_RAISE_INVALID_OPCODE(); \ 11911 } while (0) 11877 11912 11878 11913 /** -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r66405 r66457 7056 7056 7057 7057 /** 7058 * Implements 'XSAVE'. 7059 * 7060 * @param iEffSeg The effective segment. 7061 * @param GCPtrEff The address of the image. 7062 * @param enmEffOpSize The operand size (only REX.W really matters). 7063 */ 7064 IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7065 { 7066 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7067 7068 /* 7069 * Raise exceptions. 7070 */ 7071 if (!(pCtx->cr4 & X86_CR4_OSXSAVE)) 7072 return iemRaiseUndefinedOpcode(pVCpu); 7073 if (pCtx->cr0 & X86_CR0_TS) 7074 return iemRaiseDeviceNotAvailable(pVCpu); 7075 if (GCPtrEff & 63) 7076 { 7077 /** @todo CPU/VM detection possible! \#AC might not be signal for 7078 * all/any misalignment sizes, intel says its an implementation detail. */ 7079 if ( (pCtx->cr0 & X86_CR0_AM) 7080 && pCtx->eflags.Bits.u1AC 7081 && pVCpu->iem.s.uCpl == 3) 7082 return iemRaiseAlignmentCheckException(pVCpu); 7083 return iemRaiseGeneralProtectionFault0(pVCpu); 7084 } 7085 7086 /* 7087 * Calc the requested mask 7088 */ 7089 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0]; 7090 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7091 uint64_t const fXInUse = pCtx->aXcr[0]; 7092 7093 /** @todo figure out the exact protocol for the memory access. Currently we 7094 * just need this crap to work halfways to make it possible to test 7095 * AVX instructions. */ 7096 /** @todo figure out the XINUSE and XMODIFIED */ 7097 7098 /* 7099 * Access the x87 memory state. 7100 */ 7101 /* The x87+SSE state. */ 7102 void *pvMem512; 7103 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7104 if (rcStrict != VINF_SUCCESS) 7105 return rcStrict; 7106 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512; 7107 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87; 7108 7109 /* The header. */ 7110 PX86XSAVEHDR pHdr; 7111 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW); 7112 if (rcStrict != VINF_SUCCESS) 7113 return rcStrict; 7114 7115 /* 7116 * Store the X87 state. 7117 */ 7118 if (fReqComponents & XSAVE_C_X87) 7119 { 7120 /* common for all formats */ 7121 pDst->FCW = pSrc->FCW; 7122 pDst->FSW = pSrc->FSW; 7123 pDst->FTW = pSrc->FTW & UINT16_C(0xff); 7124 pDst->FOP = pSrc->FOP; 7125 pDst->FPUIP = pSrc->FPUIP; 7126 pDst->CS = pSrc->CS; 7127 pDst->FPUDP = pSrc->FPUDP; 7128 pDst->DS = pSrc->DS; 7129 if (enmEffOpSize == IEMMODE_64BIT) 7130 { 7131 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */ 7132 pDst->Rsrvd1 = pSrc->Rsrvd1; 7133 pDst->Rsrvd2 = pSrc->Rsrvd2; 7134 pDst->au32RsrvdForSoftware[0] = 0; 7135 } 7136 else 7137 { 7138 pDst->Rsrvd1 = 0; 7139 pDst->Rsrvd2 = 0; 7140 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC; 7141 } 7142 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++) 7143 { 7144 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing 7145 * them for now... */ 7146 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0]; 7147 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1]; 7148 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff); 7149 pDst->aRegs[i].au32[3] = 0; 7150 } 7151 7152 } 7153 7154 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM)) 7155 { 7156 pDst->MXCSR = pSrc->MXCSR; 7157 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM)); 7158 } 7159 7160 if (fReqComponents & XSAVE_C_SSE) 7161 { 7162 /* XMM registers. */ 7163 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 7164 for (uint32_t i = 0; i < cXmmRegs; i++) 7165 pDst->aXMM[i] = pSrc->aXMM[i]; 7166 /** @todo Testcase: What happens to the reserved XMM registers? Untouched, 7167 * right? */ 7168 } 7169 7170 /* Commit the x87 state bits. (probably wrong) */ 7171 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7172 if (rcStrict != VINF_SUCCESS) 7173 return rcStrict; 7174 7175 /* 7176 * Store AVX state. 7177 */ 7178 if (fReqComponents & XSAVE_C_YMM) 7179 { 7180 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */ 7181 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9); 7182 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI); 7183 PX86XSAVEYMMHI pCompDst; 7184 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], 7185 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7186 if (rcStrict != VINF_SUCCESS) 7187 return rcStrict; 7188 7189 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 7190 for (uint32_t i = 0; i < cXmmRegs; i++) 7191 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i]; 7192 7193 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7194 if (rcStrict != VINF_SUCCESS) 7195 return rcStrict; 7196 } 7197 7198 /* 7199 * Update the header. 7200 */ 7201 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents) 7202 | (fReqComponents & fXInUse); 7203 7204 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW); 7205 if (rcStrict != VINF_SUCCESS) 7206 return rcStrict; 7207 7208 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7209 return VINF_SUCCESS; 7210 } 7211 7212 7213 /** 7214 * Implements 'XRSTOR'. 7215 * 7216 * @param iEffSeg The effective segment. 7217 * @param GCPtrEff The address of the image. 7218 * @param enmEffOpSize The operand size (only REX.W really matters). 7219 */ 7220 IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7221 { 7222 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7223 7224 /* 7225 * Raise exceptions. 7226 */ 7227 if (!(pCtx->cr4 & X86_CR4_OSXSAVE)) 7228 return iemRaiseUndefinedOpcode(pVCpu); 7229 if (pCtx->cr0 & X86_CR0_TS) 7230 return iemRaiseDeviceNotAvailable(pVCpu); 7231 if (GCPtrEff & 63) 7232 { 7233 /** @todo CPU/VM detection possible! \#AC might not be signal for 7234 * all/any misalignment sizes, intel says its an implementation detail. */ 7235 if ( (pCtx->cr0 & X86_CR0_AM) 7236 && pCtx->eflags.Bits.u1AC 7237 && pVCpu->iem.s.uCpl == 3) 7238 return iemRaiseAlignmentCheckException(pVCpu); 7239 return iemRaiseGeneralProtectionFault0(pVCpu); 7240 } 7241 7242 /** @todo figure out the exact protocol for the memory access. Currently we 7243 * just need this crap to work halfways to make it possible to test 7244 * AVX instructions. */ 7245 /** @todo figure out the XINUSE and XMODIFIED */ 7246 7247 /* 7248 * Access the x87 memory state. 7249 */ 7250 /* The x87+SSE state. */ 7251 void *pvMem512; 7252 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R); 7253 if (rcStrict != VINF_SUCCESS) 7254 return rcStrict; 7255 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512; 7256 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87; 7257 7258 /* 7259 * Calc the requested mask 7260 */ 7261 PX86XSAVEHDR pHdrDst = &pCtx->CTX_SUFF(pXState)->Hdr; 7262 PCX86XSAVEHDR pHdrSrc; 7263 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R); 7264 if (rcStrict != VINF_SUCCESS) 7265 return rcStrict; 7266 7267 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0]; 7268 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7269 //uint64_t const fXInUse = pCtx->aXcr[0]; 7270 uint64_t const fRstorMask = pHdrSrc->bmXState; 7271 uint64_t const fCompMask = pHdrSrc->bmXComp; 7272 7273 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7274 7275 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 7276 7277 /* We won't need this any longer. */ 7278 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R); 7279 if (rcStrict != VINF_SUCCESS) 7280 return rcStrict; 7281 7282 /* 7283 * Store the X87 state. 7284 */ 7285 if (fReqComponents & XSAVE_C_X87) 7286 { 7287 if (fRstorMask & XSAVE_C_X87) 7288 { 7289 pDst->FCW = pSrc->FCW; 7290 pDst->FSW = pSrc->FSW; 7291 pDst->FTW = pSrc->FTW & UINT16_C(0xff); 7292 pDst->FOP = pSrc->FOP; 7293 pDst->FPUIP = pSrc->FPUIP; 7294 pDst->CS = pSrc->CS; 7295 pDst->FPUDP = pSrc->FPUDP; 7296 pDst->DS = pSrc->DS; 7297 if (enmEffOpSize == IEMMODE_64BIT) 7298 { 7299 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */ 7300 pDst->Rsrvd1 = pSrc->Rsrvd1; 7301 pDst->Rsrvd2 = pSrc->Rsrvd2; 7302 } 7303 else 7304 { 7305 pDst->Rsrvd1 = 0; 7306 pDst->Rsrvd2 = 0; 7307 } 7308 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++) 7309 { 7310 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0]; 7311 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1]; 7312 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff); 7313 pDst->aRegs[i].au32[3] = 0; 7314 } 7315 } 7316 else 7317 { 7318 pDst->FCW = 0x37f; 7319 pDst->FSW = 0; 7320 pDst->FTW = 0x00; /* 0 - empty. */ 7321 pDst->FPUDP = 0; 7322 pDst->DS = 0; //?? 7323 pDst->Rsrvd2= 0; 7324 pDst->FPUIP = 0; 7325 pDst->CS = 0; //?? 7326 pDst->Rsrvd1= 0; 7327 pDst->FOP = 0; 7328 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++) 7329 { 7330 pDst->aRegs[i].au32[0] = 0; 7331 pDst->aRegs[i].au32[1] = 0; 7332 pDst->aRegs[i].au32[2] = 0; 7333 pDst->aRegs[i].au32[3] = 0; 7334 } 7335 } 7336 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */ 7337 } 7338 7339 /* MXCSR */ 7340 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM)) 7341 { 7342 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM)) 7343 pDst->MXCSR = pSrc->MXCSR; 7344 else 7345 pDst->MXCSR = 0x1f80; 7346 } 7347 7348 /* XMM registers. */ 7349 if (fReqComponents & XSAVE_C_SSE) 7350 { 7351 if (fRstorMask & XSAVE_C_SSE) 7352 { 7353 for (uint32_t i = 0; i < cXmmRegs; i++) 7354 pDst->aXMM[i] = pSrc->aXMM[i]; 7355 /** @todo Testcase: What happens to the reserved XMM registers? Untouched, 7356 * right? */ 7357 } 7358 else 7359 { 7360 for (uint32_t i = 0; i < cXmmRegs; i++) 7361 { 7362 pDst->aXMM[i].au64[0] = 0; 7363 pDst->aXMM[i].au64[1] = 0; 7364 } 7365 } 7366 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */ 7367 } 7368 7369 /* Unmap the x87 state bits (so we've don't run out of mapping). */ 7370 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R); 7371 if (rcStrict != VINF_SUCCESS) 7372 return rcStrict; 7373 7374 /* 7375 * Restore AVX state. 7376 */ 7377 if (fReqComponents & XSAVE_C_YMM) 7378 { 7379 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9); 7380 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI); 7381 7382 if (fRstorMask & XSAVE_C_YMM) 7383 { 7384 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */ 7385 PCX86XSAVEYMMHI pCompSrc; 7386 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst), 7387 iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R); 7388 if (rcStrict != VINF_SUCCESS) 7389 return rcStrict; 7390 7391 for (uint32_t i = 0; i < cXmmRegs; i++) 7392 { 7393 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0]; 7394 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1]; 7395 } 7396 7397 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R); 7398 if (rcStrict != VINF_SUCCESS) 7399 return rcStrict; 7400 } 7401 else 7402 { 7403 for (uint32_t i = 0; i < cXmmRegs; i++) 7404 { 7405 pCompDst->aYmmHi[i].au64[0] = 0; 7406 pCompDst->aYmmHi[i].au64[1] = 0; 7407 } 7408 } 7409 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */ 7410 } 7411 7412 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7413 return VINF_SUCCESS; 7414 } 7415 7416 7417 7418 7419 /** 7058 7420 * Implements 'STMXCSR'. 7059 7421 * … … 7069 7431 if ( !(pCtx->cr0 & X86_CR0_EM) 7070 7432 && (pCtx->cr4 & X86_CR4_OSFXSR)) 7433 { 7434 if (!(pCtx->cr0 & X86_CR0_TS)) 7435 { 7436 /* 7437 * Do the job. 7438 */ 7439 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR); 7440 if (rcStrict == VINF_SUCCESS) 7441 { 7442 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7443 return VINF_SUCCESS; 7444 } 7445 return rcStrict; 7446 } 7447 return iemRaiseDeviceNotAvailable(pVCpu); 7448 } 7449 return iemRaiseUndefinedOpcode(pVCpu); 7450 } 7451 7452 7453 /** 7454 * Implements 'VSTMXCSR'. 7455 * 7456 * @param GCPtrEff The address of the image. 7457 */ 7458 IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff) 7459 { 7460 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7461 7462 /* 7463 * Raise exceptions. 7464 */ 7465 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu) 7466 ? (pCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM) 7467 : !(pCtx->cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */ 7468 && (pCtx->cr4 & X86_CR4_OSXSAVE)) 7071 7469 { 7072 7470 if (!(pCtx->cr0 & X86_CR0_TS)) -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r66420 r66457 6179 6179 outside of 64-bit mode. VEX is not available in real or v86 mode. */ 6180 6180 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 6181 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) 6182 { 6183 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) 6181 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT 6182 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) ) 6183 { 6184 IEMOP_MNEMONIC(vex3_prefix, "vex3"); 6185 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) 6184 6186 { 6185 IEMOP_MNEMONIC(les_Gv_Mp, "les Gv,Mp"); 6186 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm); 6187 } 6188 IEMOP_HLP_NO_REAL_OR_V86_MODE(); 6189 } 6190 6191 IEMOP_MNEMONIC(vex3_prefix, "vex3"); 6192 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) 6193 { 6194 /** @todo Test when exctly the VEX conformance checks kick in during 6195 * instruction decoding and fetching (using \#PF). */ 6196 uint8_t bVex2; IEM_OPCODE_GET_NEXT_U8(&bVex2); 6197 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode); 6198 if ( ( pVCpu->iem.s.fPrefixes 6199 & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_LOCK | IEM_OP_PRF_REX)) 6200 == 0) 6201 { 6187 /* Note! The real mode, v8086 mode and invalid prefix checks are 6188 done once the instruction is fully decoded. */ 6189 uint8_t bVex2; IEM_OPCODE_GET_NEXT_U8(&bVex2); 6190 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode); 6202 6191 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_VEX; 6203 6192 if (bVex2 & 0x80 /* VEX.W */) … … 6230 6219 } 6231 6220 } 6232 else6233 Log(("VEX3: Invalid prefix mix!\n"));6234 }6235 else6236 6221 Log(("VEX3: AVX support disabled!\n")); 6237 return IEMOP_RAISE_INVALID_OPCODE(); 6222 return IEMOP_RAISE_INVALID_OPCODE(); 6223 } 6224 6225 IEMOP_MNEMONIC(les_Gv_Mp, "les Gv,Mp"); 6226 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm); 6238 6227 } 6239 6228 … … 6257 6246 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) 6258 6247 { 6248 /* Note! The real mode, v8086 mode and invalid prefix checks are 6249 done once the instruction is fully decoded. */ 6259 6250 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode); 6260 if ( ( pVCpu->iem.s.fPrefixes 6261 & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_LOCK | IEM_OP_PRF_REX)) 6262 == 0) 6263 { 6264 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_VEX; 6265 pVCpu->iem.s.uRexReg = ~bRm >> (7 - 3); 6266 pVCpu->iem.s.uVex3rdReg = (~bRm >> 3) & 0xf; 6267 pVCpu->iem.s.uVexLength = (bRm >> 2) & 1; 6268 pVCpu->iem.s.idxPrefix = bRm & 0x3; 6269 6270 return FNIEMOP_CALL(g_apfnVexMap1[(uintptr_t)bOpcode * 4 + pVCpu->iem.s.idxPrefix]); 6271 } 6272 6273 Log(("VEX2: Invalid prefix mix!\n")); 6251 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_VEX; 6252 pVCpu->iem.s.uRexReg = ~bRm >> (7 - 3); 6253 pVCpu->iem.s.uVex3rdReg = (~bRm >> 3) & 0xf; 6254 pVCpu->iem.s.uVexLength = (bRm >> 2) & 1; 6255 pVCpu->iem.s.idxPrefix = bRm & 0x3; 6256 6257 return FNIEMOP_CALL(g_apfnVexMap1[(uintptr_t)bOpcode * 4 + pVCpu->iem.s.idxPrefix]); 6274 6258 } 6275 else 6276 Log(("VEX2: AVX support disabled!\n")); 6277 6278 /* @todo does intel completely decode the sequence with SIB/disp before \#UD? */ 6259 6260 /** @todo does intel completely decode the sequence with SIB/disp before \#UD? */ 6261 Log(("VEX2: AVX support disabled!\n")); 6279 6262 return IEMOP_RAISE_INVALID_OPCODE(); 6280 6263 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r66450 r66457 233 233 'MdWO': ( 'IDX_UseModRM', 'rm', '%Md', 'Md', ), 234 234 'Mq': ( 'IDX_UseModRM', 'rm', '%Mq', 'Mq', ), 235 'MRO': ( 'IDX_UseModRM', 'rm', '%M', 'M', ), 236 'MRW': ( 'IDX_UseModRM', 'rm', '%M', 'M', ), 235 237 236 238 # ModR/M.reg … … 1128 1130 # paging on/off 1129 1131 'paging': { 1130 'on': 'paging_on', 1131 'off': 'paging_off', 1132 'on': 'paging_on', 1133 'off': 'paging_off', 1134 }, 1135 # CPU vendor 1136 'vendor': { 1137 'amd': 'vendor_amd', 1138 'intel': 'vendor_intel', 1139 'via': 'vendor_via', 1132 1140 }, 1133 1141 }; … … 1154 1162 'paging': 'paging==on', 1155 1163 '!paging': 'paging==off', 1164 'amd': 'vendor==amd', 1165 '!amd': 'vendor!=amd', 1166 'intel': 'vendor==intel', 1167 '!intel': 'vendor!=intel', 1168 'via': 'vendor==via', 1169 '!via': 'vendor!=via', 1156 1170 }; 1157 1171 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r66450 r66457 5832 5832 * @opcpuid sse 5833 5833 * @opgroup og_sse_mxcsrsm 5834 * @opxcpttype 5 5834 5835 * @optest op1=0 -> mxcsr=0 5835 5836 * @optest op1=0x2083 -> mxcsr=0x2083 … … 5869 5870 * @opcpuid sse 5870 5871 * @opgroup og_sse_mxcsrsm 5872 * @opxcpttype 5 5871 5873 * @optest mxcsr=0 -> op1=0 5872 5874 * @optest mxcsr=0x2083 -> op1=0x2083 … … 5905 5907 * @opcpuid avx 5906 5908 * @opgroup og_avx_mxcsrsm 5909 * @opxcpttype 5 5907 5910 * @optest mxcsr=0 -> op1=0 5908 5911 * @optest mxcsr=0x2083 -> op1=0x2083 5909 5912 * @optest mxcsr=0x2084 cr0|=ts -> value.xcpt=0x7 5910 * @optest mxcsr=0x2085 cr0|=em -> op1=0x2085 5913 * @optest !amd / mxcsr=0x2085 cr0|=em -> op1=0x2085 5914 * @optest amd / mxcsr=0x2085 cr0|=em -> value.xcpt=0x6 5911 5915 * @optest mxcsr=0x2086 cr0|=mp -> op1=0x2086 5912 5916 * @optest mxcsr=0x2087 cr4&~=osfxsr -> op1=0x2087 5913 * @optest mxcsr=0x2088 cr0|=ts,em -> value.xcpt=0x75914 * @optest mxcsr=0x2089 cr0|=em cr4&~=osfxsr -> op1=0x20895915 * @optest mxcsr=0x208a cr0|=ts,em cr4&~=osfxsr -> value.xcpt=0x75916 * @optest mxcsr=0x208b cr0|=ts,em,mp cr4&~=osfxsr -> value.xcpt=0x75917 * @optest mxcsr=0x208c xcr0&~=all_avx -> value.xcpt=0x65918 * @optest mxcsr=0x208d xcr0&~=all_avx_sse -> value.xcpt=0x65919 * @optest mxcsr=0x208e xcr0&~=all_avx cr0|=ts -> value.xcpt=0x65920 5917 * @optest mxcsr=0x208f cr4&~=osxsave -> value.xcpt=0x6 5918 * @optest mxcsr=0x2087 cr4&~=osfxsr,osxsave -> value.xcpt=0x6 5919 * @optest !amd / mxcsr=0x2088 cr0|=ts,em -> value.xcpt=0x7 5920 * @optest amd / mxcsr=0x2088 cr0|=ts,em -> value.xcpt=0x6 5921 * @optest !amd / mxcsr=0x2089 cr0|=em cr4&~=osfxsr -> op1=0x2089 5922 * @optest amd / mxcsr=0x2089 cr0|=em cr4&~=osfxsr -> value.xcpt=0x6 5923 * @optest !amd / mxcsr=0x208a cr0|=ts,em cr4&~=osfxsr -> value.xcpt=0x7 5924 * @optest amd / mxcsr=0x208a cr0|=ts,em cr4&~=osfxsr -> value.xcpt=0x6 5925 * @optest !amd / mxcsr=0x208b cr0|=ts,em,mp cr4&~=osfxsr -> value.xcpt=0x7 5926 * @optest amd / mxcsr=0x208b cr0|=ts,em,mp cr4&~=osfxsr -> value.xcpt=0x6 5927 * @optest !amd / mxcsr=0x208c xcr0&~=all_avx -> value.xcpt=0x6 5928 * @optest amd / mxcsr=0x208c xcr0&~=all_avx -> op1=0x208c 5929 * @optest !amd / mxcsr=0x208d xcr0&~=all_avx_sse -> value.xcpt=0x6 5930 * @optest amd / mxcsr=0x208d xcr0&~=all_avx_sse -> op1=0x208d 5931 * @optest !amd / mxcsr=0x208e xcr0&~=all_avx cr0|=ts -> value.xcpt=0x6 5932 * @optest amd / mxcsr=0x208e xcr0&~=all_avx cr0|=ts -> value.xcpt=0x7 5921 5933 * @optest mxcsr=0x2082 cr0|=ts cr4&~=osxsave -> value.xcpt=0x6 5922 5934 * @optest mxcsr=0x2081 xcr0&~=all_avx cr0|=ts cr4&~=osxsave 5923 5935 * -> value.xcpt=0x6 5936 * @remarks AMD Jaguar CPU (f0x16,m0,s1) \#UD when CR0.EM is set. It also 5937 * doesn't seem to check XCR0[2:1] != 11b. This does not match the 5938 * APMv4 rev 3.17 page 509. 5939 * @todo Test this instruction on AMD Ryzen. 5924 5940 * @oponlytest 5925 5941 */ … … 5934 5950 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5935 5951 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5936 IEMOP_HLP_DONE_ DECODING_NO_LOCK_PREFIX();5952 IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV(); 5937 5953 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 5938 5954 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 5939 IEM_MC_CALL_CIMPL_2(iemCImpl_ stmxcsr, iEffSeg, GCPtrEff);5955 IEM_MC_CALL_CIMPL_2(iemCImpl_vstmxcsr, iEffSeg, GCPtrEff); 5940 5956 IEM_MC_END(); 5941 5957 return VINF_SUCCESS; … … 5943 5959 5944 5960 5945 /** Opcode 0x0f 0xae mem/4. */ 5946 FNIEMOP_UD_STUB_1(iemOp_Grp15_xsave, uint8_t, bRm); 5947 5948 /** Opcode 0x0f 0xae mem/5. */ 5949 FNIEMOP_UD_STUB_1(iemOp_Grp15_xrstor, uint8_t, bRm); 5961 /** 5962 * @opmaps vexgrp15 5963 * @opcode !11/4 5964 * @oppfx none 5965 * @opcpuid xsave 5966 * @opgroup og_system 5967 * @opxcpttype none 5968 */ 5969 FNIEMOP_DEF_1(iemOp_Grp15_xsave, uint8_t, bRm) 5970 { 5971 IEMOP_MNEMONIC1(M_MEM, XSAVE, xsave, MRW, DISOPTYPE_HARMLESS, 0); 5972 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 5973 return IEMOP_RAISE_INVALID_OPCODE(); 5974 5975 IEM_MC_BEGIN(3, 0); 5976 IEM_MC_ARG(uint8_t, iEffSeg, 0); 5977 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5978 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 2); 5979 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5980 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5981 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 5982 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 5983 IEM_MC_CALL_CIMPL_3(iemCImpl_xsave, iEffSeg, GCPtrEff, enmEffOpSize); 5984 IEM_MC_END(); 5985 return VINF_SUCCESS; 5986 } 5987 5988 5989 /** 5990 * @opmaps vexgrp15 5991 * @opcode !11/5 5992 * @oppfx none 5993 * @opcpuid xsave 5994 * @opgroup og_system 5995 * @opxcpttype none 5996 */ 5997 FNIEMOP_DEF_1(iemOp_Grp15_xrstor, uint8_t, bRm) 5998 { 5999 IEMOP_MNEMONIC1(M_MEM, XRSTOR, xrstor, MRO, DISOPTYPE_HARMLESS, 0); 6000 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 6001 return IEMOP_RAISE_INVALID_OPCODE(); 6002 6003 IEM_MC_BEGIN(3, 0); 6004 IEM_MC_ARG(uint8_t, iEffSeg, 0); 6005 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 6006 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 2); 6007 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 6008 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 6009 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 6010 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 6011 IEM_MC_CALL_CIMPL_3(iemCImpl_xrstor, iEffSeg, GCPtrEff, enmEffOpSize); 6012 IEM_MC_END(); 6013 return VINF_SUCCESS; 6014 } 5950 6015 5951 6016 /** Opcode 0x0f 0xae mem/6. */ … … 8725 8790 /* 0x8e */ IEMOP_X4(iemOp_InvalidNeedRM), 8726 8791 /* 0x8f */ IEMOP_X4(iemOp_InvalidNeedRM), 8727 IEMOP_X4(iemOp_InvalidNeedRM), 8792 8728 8793 /* 0x90 */ IEMOP_X4(iemOp_InvalidNeedRM), 8729 8794 /* 0x91 */ IEMOP_X4(iemOp_InvalidNeedRM), … … 8845 8910 /* 0xff */ IEMOP_X4(iemOp_ud0), 8846 8911 }; 8847 AssertCompile(RT_ELEMENTS(g_apfn TwoByteMap) == 1024);8912 AssertCompile(RT_ELEMENTS(g_apfnVexMap1) == 1024); 8848 8913 /** @} */ 8849 8914 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r66314 r66457 119 119 #define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() do { } while (0) 120 120 #define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) do { } while (0) 121 #define IEMOP_HLP_DONE_ DECODING()do { } while (0)121 #define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() do { } while (0) 122 122 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() do { } while (0) 123 123 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() do { } while (0) 124 #define IEMOP_HLP_DONE_DECODING() do { } while (0) 125 #define IEMOP_HLP_DONE_VEX_DECODING() do { } while (0) 126 124 127 #define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) do { } while (0) 125 128 #define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) do { } while (0)
Note:
See TracChangeset
for help on using the changeset viewer.