Changeset 66457 in vbox
- Timestamp:
- Apr 6, 2017 10:44:30 AM (8 years ago)
- svn:sync-xref-src-repo-rev:
- 114437
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/disopcode.h
r66412 r66457 724 724 OP_LDMXCSR, 725 725 OP_STMXCSR, 726 OP_XSAVE, 727 OP_XRSTOR, 726 728 OP_LFENCE, 727 729 OP_MFENCE, … … 1067 1069 #define OP_PARM_MdRO OP_PARM_Md /**< Annotates read only memory byte operand. */ 1068 1070 #define OP_PARM_MdWO OP_PARM_Md /**< Annotates write only memory byte operand. */ 1071 #define OP_PARM_MRO OP_PARM_M /**< Annotates read only memory of variable operand size (xrstor). */ 1072 #define OP_PARM_MRW OP_PARM_M /**< Annotates read-write memory of variable operand size (xsave). */ 1069 1073 1070 1074 /** @} */ -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r66392 r66457 8448 8448 * Check the input and figure out which mapping entry to use. 8449 8449 */ 8450 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */8450 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */ 8451 8451 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK))); 8452 8452 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings)); … … 11752 11752 do \ 11753 11753 { \ 11754 if ( IEM_IS_REAL_OR_V86_MODE(pVCpu))\11755 11754 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \ 11755 else return IEMOP_RAISE_INVALID_OPCODE(); \ 11756 11756 } while (0) 11757 11757 … … 11839 11839 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \ 11840 11840 } while (0) 11841 11841 11842 #define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \ 11842 11843 do \ … … 11875 11876 } while (0) 11876 11877 11878 11879 /** 11880 * Done decoding VEX. 11881 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if 11882 * we're in real or v8086 mode. 11883 */ 11884 #define IEMOP_HLP_DONE_VEX_DECODING() \ 11885 do \ 11886 { \ 11887 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \ 11888 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \ 11889 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \ 11890 { /* likely */ } \ 11891 else \ 11892 return IEMOP_RAISE_INVALID_OPCODE(); \ 11893 } while (0) 11894 11895 /** 11896 * Done decoding VEX, no V, no L. 11897 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if 11898 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0. 11899 */ 11900 #define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \ 11901 do \ 11902 { \ 11903 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \ 11904 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \ 11905 && pVCpu->iem.s.uVexLength == 0 \ 11906 && pVCpu->iem.s.uVex3rdReg == 0 \ 11907 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \ 11908 { /* likely */ } \ 11909 else \ 11910 return IEMOP_RAISE_INVALID_OPCODE(); \ 11911 } while (0) 11877 11912 11878 11913 /** -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r66405 r66457 7056 7056 7057 7057 /** 7058 * Implements 'XSAVE'. 7059 * 7060 * @param iEffSeg The effective segment. 7061 * @param GCPtrEff The address of the image. 7062 * @param enmEffOpSize The operand size (only REX.W really matters). 7063 */ 7064 IEM_CIMPL_DEF_3(iemCImpl_xsave, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7065 { 7066 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7067 7068 /* 7069 * Raise exceptions. 7070 */ 7071 if (!(pCtx->cr4 & X86_CR4_OSXSAVE)) 7072 return iemRaiseUndefinedOpcode(pVCpu); 7073 if (pCtx->cr0 & X86_CR0_TS) 7074 return iemRaiseDeviceNotAvailable(pVCpu); 7075 if (GCPtrEff & 63) 7076 { 7077 /** @todo CPU/VM detection possible! \#AC might not be signal for 7078 * all/any misalignment sizes, intel says its an implementation detail. */ 7079 if ( (pCtx->cr0 & X86_CR0_AM) 7080 && pCtx->eflags.Bits.u1AC 7081 && pVCpu->iem.s.uCpl == 3) 7082 return iemRaiseAlignmentCheckException(pVCpu); 7083 return iemRaiseGeneralProtectionFault0(pVCpu); 7084 } 7085 7086 /* 7087 * Calc the requested mask 7088 */ 7089 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0]; 7090 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7091 uint64_t const fXInUse = pCtx->aXcr[0]; 7092 7093 /** @todo figure out the exact protocol for the memory access. Currently we 7094 * just need this crap to work halfways to make it possible to test 7095 * AVX instructions. */ 7096 /** @todo figure out the XINUSE and XMODIFIED */ 7097 7098 /* 7099 * Access the x87 memory state. 7100 */ 7101 /* The x87+SSE state. */ 7102 void *pvMem512; 7103 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7104 if (rcStrict != VINF_SUCCESS) 7105 return rcStrict; 7106 PX86FXSTATE pDst = (PX86FXSTATE)pvMem512; 7107 PCX86FXSTATE pSrc = &pCtx->CTX_SUFF(pXState)->x87; 7108 7109 /* The header. */ 7110 PX86XSAVEHDR pHdr; 7111 rcStrict = iemMemMap(pVCpu, (void **)&pHdr, sizeof(&pHdr), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_RW); 7112 if (rcStrict != VINF_SUCCESS) 7113 return rcStrict; 7114 7115 /* 7116 * Store the X87 state. 7117 */ 7118 if (fReqComponents & XSAVE_C_X87) 7119 { 7120 /* common for all formats */ 7121 pDst->FCW = pSrc->FCW; 7122 pDst->FSW = pSrc->FSW; 7123 pDst->FTW = pSrc->FTW & UINT16_C(0xff); 7124 pDst->FOP = pSrc->FOP; 7125 pDst->FPUIP = pSrc->FPUIP; 7126 pDst->CS = pSrc->CS; 7127 pDst->FPUDP = pSrc->FPUDP; 7128 pDst->DS = pSrc->DS; 7129 if (enmEffOpSize == IEMMODE_64BIT) 7130 { 7131 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */ 7132 pDst->Rsrvd1 = pSrc->Rsrvd1; 7133 pDst->Rsrvd2 = pSrc->Rsrvd2; 7134 pDst->au32RsrvdForSoftware[0] = 0; 7135 } 7136 else 7137 { 7138 pDst->Rsrvd1 = 0; 7139 pDst->Rsrvd2 = 0; 7140 pDst->au32RsrvdForSoftware[0] = X86_FXSTATE_RSVD_32BIT_MAGIC; 7141 } 7142 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++) 7143 { 7144 /** @todo Testcase: What actually happens to the 6 reserved bytes? I'm clearing 7145 * them for now... */ 7146 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0]; 7147 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1]; 7148 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff); 7149 pDst->aRegs[i].au32[3] = 0; 7150 } 7151 7152 } 7153 7154 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM)) 7155 { 7156 pDst->MXCSR = pSrc->MXCSR; 7157 pDst->MXCSR_MASK = CPUMGetGuestMxCsrMask(pVCpu->CTX_SUFF(pVM)); 7158 } 7159 7160 if (fReqComponents & XSAVE_C_SSE) 7161 { 7162 /* XMM registers. */ 7163 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 7164 for (uint32_t i = 0; i < cXmmRegs; i++) 7165 pDst->aXMM[i] = pSrc->aXMM[i]; 7166 /** @todo Testcase: What happens to the reserved XMM registers? Untouched, 7167 * right? */ 7168 } 7169 7170 /* Commit the x87 state bits. (probably wrong) */ 7171 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7172 if (rcStrict != VINF_SUCCESS) 7173 return rcStrict; 7174 7175 /* 7176 * Store AVX state. 7177 */ 7178 if (fReqComponents & XSAVE_C_YMM) 7179 { 7180 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */ 7181 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9); 7182 PCX86XSAVEYMMHI pCompSrc = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PCX86XSAVEYMMHI); 7183 PX86XSAVEYMMHI pCompDst; 7184 rcStrict = iemMemMap(pVCpu, (void **)&pCompDst, sizeof(*pCompDst), iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], 7185 IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7186 if (rcStrict != VINF_SUCCESS) 7187 return rcStrict; 7188 7189 uint32_t cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 7190 for (uint32_t i = 0; i < cXmmRegs; i++) 7191 pCompDst->aYmmHi[i] = pCompSrc->aYmmHi[i]; 7192 7193 rcStrict = iemMemCommitAndUnmap(pVCpu, pCompDst, IEM_ACCESS_DATA_W | IEM_ACCESS_PARTIAL_WRITE); 7194 if (rcStrict != VINF_SUCCESS) 7195 return rcStrict; 7196 } 7197 7198 /* 7199 * Update the header. 7200 */ 7201 pHdr->bmXState = (pHdr->bmXState & ~fReqComponents) 7202 | (fReqComponents & fXInUse); 7203 7204 rcStrict = iemMemCommitAndUnmap(pVCpu, pHdr, IEM_ACCESS_DATA_RW); 7205 if (rcStrict != VINF_SUCCESS) 7206 return rcStrict; 7207 7208 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7209 return VINF_SUCCESS; 7210 } 7211 7212 7213 /** 7214 * Implements 'XRSTOR'. 7215 * 7216 * @param iEffSeg The effective segment. 7217 * @param GCPtrEff The address of the image. 7218 * @param enmEffOpSize The operand size (only REX.W really matters). 7219 */ 7220 IEM_CIMPL_DEF_3(iemCImpl_xrstor, uint8_t, iEffSeg, RTGCPTR, GCPtrEff, IEMMODE, enmEffOpSize) 7221 { 7222 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7223 7224 /* 7225 * Raise exceptions. 7226 */ 7227 if (!(pCtx->cr4 & X86_CR4_OSXSAVE)) 7228 return iemRaiseUndefinedOpcode(pVCpu); 7229 if (pCtx->cr0 & X86_CR0_TS) 7230 return iemRaiseDeviceNotAvailable(pVCpu); 7231 if (GCPtrEff & 63) 7232 { 7233 /** @todo CPU/VM detection possible! \#AC might not be signal for 7234 * all/any misalignment sizes, intel says its an implementation detail. */ 7235 if ( (pCtx->cr0 & X86_CR0_AM) 7236 && pCtx->eflags.Bits.u1AC 7237 && pVCpu->iem.s.uCpl == 3) 7238 return iemRaiseAlignmentCheckException(pVCpu); 7239 return iemRaiseGeneralProtectionFault0(pVCpu); 7240 } 7241 7242 /** @todo figure out the exact protocol for the memory access. Currently we 7243 * just need this crap to work halfways to make it possible to test 7244 * AVX instructions. */ 7245 /** @todo figure out the XINUSE and XMODIFIED */ 7246 7247 /* 7248 * Access the x87 memory state. 7249 */ 7250 /* The x87+SSE state. */ 7251 void *pvMem512; 7252 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvMem512, 512, iEffSeg, GCPtrEff, IEM_ACCESS_DATA_R); 7253 if (rcStrict != VINF_SUCCESS) 7254 return rcStrict; 7255 PCX86FXSTATE pSrc = (PCX86FXSTATE)pvMem512; 7256 PX86FXSTATE pDst = &pCtx->CTX_SUFF(pXState)->x87; 7257 7258 /* 7259 * Calc the requested mask 7260 */ 7261 PX86XSAVEHDR pHdrDst = &pCtx->CTX_SUFF(pXState)->Hdr; 7262 PCX86XSAVEHDR pHdrSrc; 7263 rcStrict = iemMemMap(pVCpu, (void **)&pHdrSrc, sizeof(&pHdrSrc), iEffSeg, GCPtrEff + 512, IEM_ACCESS_DATA_R); 7264 if (rcStrict != VINF_SUCCESS) 7265 return rcStrict; 7266 7267 uint64_t const fReqComponents = RT_MAKE_U64(pCtx->eax, pCtx->edx) & pCtx->aXcr[0]; 7268 AssertLogRelReturn(!(fReqComponents & ~(XSAVE_C_X87 | XSAVE_C_SSE | XSAVE_C_YMM)), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7269 //uint64_t const fXInUse = pCtx->aXcr[0]; 7270 uint64_t const fRstorMask = pHdrSrc->bmXState; 7271 uint64_t const fCompMask = pHdrSrc->bmXComp; 7272 7273 AssertLogRelReturn(!(fCompMask & XSAVE_C_X), VERR_IEM_ASPECT_NOT_IMPLEMENTED); 7274 7275 uint32_t const cXmmRegs = enmEffOpSize == IEMMODE_64BIT ? 16 : 8; 7276 7277 /* We won't need this any longer. */ 7278 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pHdrSrc, IEM_ACCESS_DATA_R); 7279 if (rcStrict != VINF_SUCCESS) 7280 return rcStrict; 7281 7282 /* 7283 * Store the X87 state. 7284 */ 7285 if (fReqComponents & XSAVE_C_X87) 7286 { 7287 if (fRstorMask & XSAVE_C_X87) 7288 { 7289 pDst->FCW = pSrc->FCW; 7290 pDst->FSW = pSrc->FSW; 7291 pDst->FTW = pSrc->FTW & UINT16_C(0xff); 7292 pDst->FOP = pSrc->FOP; 7293 pDst->FPUIP = pSrc->FPUIP; 7294 pDst->CS = pSrc->CS; 7295 pDst->FPUDP = pSrc->FPUDP; 7296 pDst->DS = pSrc->DS; 7297 if (enmEffOpSize == IEMMODE_64BIT) 7298 { 7299 /* Save upper 16-bits of FPUIP (IP:CS:Rsvd1) and FPUDP (DP:DS:Rsvd2). */ 7300 pDst->Rsrvd1 = pSrc->Rsrvd1; 7301 pDst->Rsrvd2 = pSrc->Rsrvd2; 7302 } 7303 else 7304 { 7305 pDst->Rsrvd1 = 0; 7306 pDst->Rsrvd2 = 0; 7307 } 7308 for (uint32_t i = 0; i < RT_ELEMENTS(pDst->aRegs); i++) 7309 { 7310 pDst->aRegs[i].au32[0] = pSrc->aRegs[i].au32[0]; 7311 pDst->aRegs[i].au32[1] = pSrc->aRegs[i].au32[1]; 7312 pDst->aRegs[i].au32[2] = pSrc->aRegs[i].au32[2] & UINT32_C(0xffff); 7313 pDst->aRegs[i].au32[3] = 0; 7314 } 7315 } 7316 else 7317 { 7318 pDst->FCW = 0x37f; 7319 pDst->FSW = 0; 7320 pDst->FTW = 0x00; /* 0 - empty. */ 7321 pDst->FPUDP = 0; 7322 pDst->DS = 0; //?? 7323 pDst->Rsrvd2= 0; 7324 pDst->FPUIP = 0; 7325 pDst->CS = 0; //?? 7326 pDst->Rsrvd1= 0; 7327 pDst->FOP = 0; 7328 for (uint32_t i = 0; i < RT_ELEMENTS(pSrc->aRegs); i++) 7329 { 7330 pDst->aRegs[i].au32[0] = 0; 7331 pDst->aRegs[i].au32[1] = 0; 7332 pDst->aRegs[i].au32[2] = 0; 7333 pDst->aRegs[i].au32[3] = 0; 7334 } 7335 } 7336 pHdrDst->bmXState |= XSAVE_C_X87; /* playing safe for now */ 7337 } 7338 7339 /* MXCSR */ 7340 if (fReqComponents & (XSAVE_C_SSE | XSAVE_C_YMM)) 7341 { 7342 if (fRstorMask & (XSAVE_C_SSE | XSAVE_C_YMM)) 7343 pDst->MXCSR = pSrc->MXCSR; 7344 else 7345 pDst->MXCSR = 0x1f80; 7346 } 7347 7348 /* XMM registers. */ 7349 if (fReqComponents & XSAVE_C_SSE) 7350 { 7351 if (fRstorMask & XSAVE_C_SSE) 7352 { 7353 for (uint32_t i = 0; i < cXmmRegs; i++) 7354 pDst->aXMM[i] = pSrc->aXMM[i]; 7355 /** @todo Testcase: What happens to the reserved XMM registers? Untouched, 7356 * right? */ 7357 } 7358 else 7359 { 7360 for (uint32_t i = 0; i < cXmmRegs; i++) 7361 { 7362 pDst->aXMM[i].au64[0] = 0; 7363 pDst->aXMM[i].au64[1] = 0; 7364 } 7365 } 7366 pHdrDst->bmXState |= XSAVE_C_SSE; /* playing safe for now */ 7367 } 7368 7369 /* Unmap the x87 state bits (so we've don't run out of mapping). */ 7370 rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem512, IEM_ACCESS_DATA_R); 7371 if (rcStrict != VINF_SUCCESS) 7372 return rcStrict; 7373 7374 /* 7375 * Restore AVX state. 7376 */ 7377 if (fReqComponents & XSAVE_C_YMM) 7378 { 7379 AssertLogRelReturn(pCtx->aoffXState[XSAVE_C_YMM_BIT] != UINT16_MAX, VERR_IEM_IPE_9); 7380 PX86XSAVEYMMHI pCompDst = CPUMCTX_XSAVE_C_PTR(pCtx, XSAVE_C_YMM_BIT, PX86XSAVEYMMHI); 7381 7382 if (fRstorMask & XSAVE_C_YMM) 7383 { 7384 /** @todo testcase: xsave64 vs xsave32 wrt XSAVE_C_YMM. */ 7385 PCX86XSAVEYMMHI pCompSrc; 7386 rcStrict = iemMemMap(pVCpu, (void **)&pCompSrc, sizeof(*pCompDst), 7387 iEffSeg, GCPtrEff + pCtx->aoffXState[XSAVE_C_YMM_BIT], IEM_ACCESS_DATA_R); 7388 if (rcStrict != VINF_SUCCESS) 7389 return rcStrict; 7390 7391 for (uint32_t i = 0; i < cXmmRegs; i++) 7392 { 7393 pCompDst->aYmmHi[i].au64[0] = pCompSrc->aYmmHi[i].au64[0]; 7394 pCompDst->aYmmHi[i].au64[1] = pCompSrc->aYmmHi[i].au64[1]; 7395 } 7396 7397 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pCompSrc, IEM_ACCESS_DATA_R); 7398 if (rcStrict != VINF_SUCCESS) 7399 return rcStrict; 7400 } 7401 else 7402 { 7403 for (uint32_t i = 0; i < cXmmRegs; i++) 7404 { 7405 pCompDst->aYmmHi[i].au64[0] = 0; 7406 pCompDst->aYmmHi[i].au64[1] = 0; 7407 } 7408 } 7409 pHdrDst->bmXState |= XSAVE_C_YMM; /* playing safe for now */ 7410 } 7411 7412 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7413 return VINF_SUCCESS; 7414 } 7415 7416 7417 7418 7419 /** 7058 7420 * Implements 'STMXCSR'. 7059 7421 * … … 7069 7431 if ( !(pCtx->cr0 & X86_CR0_EM) 7070 7432 && (pCtx->cr4 & X86_CR4_OSFXSR)) 7433 { 7434 if (!(pCtx->cr0 & X86_CR0_TS)) 7435 { 7436 /* 7437 * Do the job. 7438 */ 7439 VBOXSTRICTRC rcStrict = iemMemStoreDataU32(pVCpu, iEffSeg, GCPtrEff, pCtx->CTX_SUFF(pXState)->x87.MXCSR); 7440 if (rcStrict == VINF_SUCCESS) 7441 { 7442 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 7443 return VINF_SUCCESS; 7444 } 7445 return rcStrict; 7446 } 7447 return iemRaiseDeviceNotAvailable(pVCpu); 7448 } 7449 return iemRaiseUndefinedOpcode(pVCpu); 7450 } 7451 7452 7453 /** 7454 * Implements 'VSTMXCSR'. 7455 * 7456 * @param GCPtrEff The address of the image. 7457 */ 7458 IEM_CIMPL_DEF_2(iemCImpl_vstmxcsr, uint8_t, iEffSeg, RTGCPTR, GCPtrEff) 7459 { 7460 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 7461 7462 /* 7463 * Raise exceptions. 7464 */ 7465 if ( ( !IEM_IS_GUEST_CPU_AMD(pVCpu) 7466 ? (pCtx->aXcr[0] & (XSAVE_C_SSE | XSAVE_C_YMM)) == (XSAVE_C_SSE | XSAVE_C_YMM) 7467 : !(pCtx->cr0 & X86_CR0_EM)) /* AMD Jaguar CPU (f0x16,m0,s1) behaviour */ 7468 && (pCtx->cr4 & X86_CR4_OSXSAVE)) 7071 7469 { 7072 7470 if (!(pCtx->cr0 & X86_CR0_TS)) -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
r66420 r66457 6179 6179 outside of 64-bit mode. VEX is not available in real or v86 mode. */ 6180 6180 uint8_t bRm; IEM_OPCODE_GET_NEXT_U8(&bRm); 6181 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) 6182 { 6183 if ((bRm & X86_MODRM_MOD_MASK) != (3 << X86_MODRM_MOD_SHIFT)) 6181 if ( pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT 6182 || (bRm & X86_MODRM_MOD_MASK) == (3 << X86_MODRM_MOD_SHIFT) ) 6183 { 6184 IEMOP_MNEMONIC(vex3_prefix, "vex3"); 6185 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) 6184 6186 { 6185 IEMOP_MNEMONIC(les_Gv_Mp, "les Gv,Mp"); 6186 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm); 6187 } 6188 IEMOP_HLP_NO_REAL_OR_V86_MODE(); 6189 } 6190 6191 IEMOP_MNEMONIC(vex3_prefix, "vex3"); 6192 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) 6193 { 6194 /** @todo Test when exctly the VEX conformance checks kick in during 6195 * instruction decoding and fetching (using \#PF). */ 6196 uint8_t bVex2; IEM_OPCODE_GET_NEXT_U8(&bVex2); 6197 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode); 6198 if ( ( pVCpu->iem.s.fPrefixes 6199 & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_LOCK | IEM_OP_PRF_REX)) 6200 == 0) 6201 { 6187 /* Note! The real mode, v8086 mode and invalid prefix checks are 6188 done once the instruction is fully decoded. */ 6189 uint8_t bVex2; IEM_OPCODE_GET_NEXT_U8(&bVex2); 6190 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode); 6202 6191 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_VEX; 6203 6192 if (bVex2 & 0x80 /* VEX.W */) … … 6230 6219 } 6231 6220 } 6232 else6233 Log(("VEX3: Invalid prefix mix!\n"));6234 }6235 else6236 6221 Log(("VEX3: AVX support disabled!\n")); 6237 return IEMOP_RAISE_INVALID_OPCODE(); 6222 return IEMOP_RAISE_INVALID_OPCODE(); 6223 } 6224 6225 IEMOP_MNEMONIC(les_Gv_Mp, "les Gv,Mp"); 6226 return FNIEMOP_CALL_2(iemOpCommonLoadSRegAndGreg, X86_SREG_ES, bRm); 6238 6227 } 6239 6228 … … 6257 6246 if (IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) 6258 6247 { 6248 /* Note! The real mode, v8086 mode and invalid prefix checks are 6249 done once the instruction is fully decoded. */ 6259 6250 uint8_t bOpcode; IEM_OPCODE_GET_NEXT_U8(&bOpcode); 6260 if ( ( pVCpu->iem.s.fPrefixes 6261 & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_LOCK | IEM_OP_PRF_REX)) 6262 == 0) 6263 { 6264 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_VEX; 6265 pVCpu->iem.s.uRexReg = ~bRm >> (7 - 3); 6266 pVCpu->iem.s.uVex3rdReg = (~bRm >> 3) & 0xf; 6267 pVCpu->iem.s.uVexLength = (bRm >> 2) & 1; 6268 pVCpu->iem.s.idxPrefix = bRm & 0x3; 6269 6270 return FNIEMOP_CALL(g_apfnVexMap1[(uintptr_t)bOpcode * 4 + pVCpu->iem.s.idxPrefix]); 6271 } 6272 6273 Log(("VEX2: Invalid prefix mix!\n")); 6251 pVCpu->iem.s.fPrefixes |= IEM_OP_PRF_VEX; 6252 pVCpu->iem.s.uRexReg = ~bRm >> (7 - 3); 6253 pVCpu->iem.s.uVex3rdReg = (~bRm >> 3) & 0xf; 6254 pVCpu->iem.s.uVexLength = (bRm >> 2) & 1; 6255 pVCpu->iem.s.idxPrefix = bRm & 0x3; 6256 6257 return FNIEMOP_CALL(g_apfnVexMap1[(uintptr_t)bOpcode * 4 + pVCpu->iem.s.idxPrefix]); 6274 6258 } 6275 else 6276 Log(("VEX2: AVX support disabled!\n")); 6277 6278 /* @todo does intel completely decode the sequence with SIB/disp before \#UD? */ 6259 6260 /** @todo does intel completely decode the sequence with SIB/disp before \#UD? */ 6261 Log(("VEX2: AVX support disabled!\n")); 6279 6262 return IEMOP_RAISE_INVALID_OPCODE(); 6280 6263 } -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py
r66450 r66457 233 233 'MdWO': ( 'IDX_UseModRM', 'rm', '%Md', 'Md', ), 234 234 'Mq': ( 'IDX_UseModRM', 'rm', '%Mq', 'Mq', ), 235 'MRO': ( 'IDX_UseModRM', 'rm', '%M', 'M', ), 236 'MRW': ( 'IDX_UseModRM', 'rm', '%M', 'M', ), 235 237 236 238 # ModR/M.reg … … 1128 1130 # paging on/off 1129 1131 'paging': { 1130 'on': 'paging_on', 1131 'off': 'paging_off', 1132 'on': 'paging_on', 1133 'off': 'paging_off', 1134 }, 1135 # CPU vendor 1136 'vendor': { 1137 'amd': 'vendor_amd', 1138 'intel': 'vendor_intel', 1139 'via': 'vendor_via', 1132 1140 }, 1133 1141 }; … … 1154 1162 'paging': 'paging==on', 1155 1163 '!paging': 'paging==off', 1164 'amd': 'vendor==amd', 1165 '!amd': 'vendor!=amd', 1166 'intel': 'vendor==intel', 1167 '!intel': 'vendor!=intel', 1168 'via': 'vendor==via', 1169 '!via': 'vendor!=via', 1156 1170 }; 1157 1171 -
trunk/src/VBox/VMM/VMMAll/IEMAllInstructionsTwoByte0f.cpp.h
r66450 r66457 5832 5832 * @opcpuid sse 5833 5833 * @opgroup og_sse_mxcsrsm 5834 * @opxcpttype 5 5834 5835 * @optest op1=0 -> mxcsr=0 5835 5836 * @optest op1=0x2083 -> mxcsr=0x2083 … … 5869 5870 * @opcpuid sse 5870 5871 * @opgroup og_sse_mxcsrsm 5872 * @opxcpttype 5 5871 5873 * @optest mxcsr=0 -> op1=0 5872 5874 * @optest mxcsr=0x2083 -> op1=0x2083 … … 5905 5907 * @opcpuid avx 5906 5908 * @opgroup og_avx_mxcsrsm 5909 * @opxcpttype 5 5907 5910 * @optest mxcsr=0 -> op1=0 5908 5911 * @optest mxcsr=0x2083 -> op1=0x2083 5909 5912 * @optest mxcsr=0x2084 cr0|=ts -> value.xcpt=0x7 5910 * @optest mxcsr=0x2085 cr0|=em -> op1=0x2085 5913 * @optest !amd / mxcsr=0x2085 cr0|=em -> op1=0x2085 5914 * @optest amd / mxcsr=0x2085 cr0|=em -> value.xcpt=0x6 5911 5915 * @optest mxcsr=0x2086 cr0|=mp -> op1=0x2086 5912 5916 * @optest mxcsr=0x2087 cr4&~=osfxsr -> op1=0x2087 5913 * @optest mxcsr=0x2088 cr0|=ts,em -> value.xcpt=0x75914 * @optest mxcsr=0x2089 cr0|=em cr4&~=osfxsr -> op1=0x20895915 * @optest mxcsr=0x208a cr0|=ts,em cr4&~=osfxsr -> value.xcpt=0x75916 * @optest mxcsr=0x208b cr0|=ts,em,mp cr4&~=osfxsr -> value.xcpt=0x75917 * @optest mxcsr=0x208c xcr0&~=all_avx -> value.xcpt=0x65918 * @optest mxcsr=0x208d xcr0&~=all_avx_sse -> value.xcpt=0x65919 * @optest mxcsr=0x208e xcr0&~=all_avx cr0|=ts -> value.xcpt=0x65920 5917 * @optest mxcsr=0x208f cr4&~=osxsave -> value.xcpt=0x6 5918 * @optest mxcsr=0x2087 cr4&~=osfxsr,osxsave -> value.xcpt=0x6 5919 * @optest !amd / mxcsr=0x2088 cr0|=ts,em -> value.xcpt=0x7 5920 * @optest amd / mxcsr=0x2088 cr0|=ts,em -> value.xcpt=0x6 5921 * @optest !amd / mxcsr=0x2089 cr0|=em cr4&~=osfxsr -> op1=0x2089 5922 * @optest amd / mxcsr=0x2089 cr0|=em cr4&~=osfxsr -> value.xcpt=0x6 5923 * @optest !amd / mxcsr=0x208a cr0|=ts,em cr4&~=osfxsr -> value.xcpt=0x7 5924 * @optest amd / mxcsr=0x208a cr0|=ts,em cr4&~=osfxsr -> value.xcpt=0x6 5925 * @optest !amd / mxcsr=0x208b cr0|=ts,em,mp cr4&~=osfxsr -> value.xcpt=0x7 5926 * @optest amd / mxcsr=0x208b cr0|=ts,em,mp cr4&~=osfxsr -> value.xcpt=0x6 5927 * @optest !amd / mxcsr=0x208c xcr0&~=all_avx -> value.xcpt=0x6 5928 * @optest amd / mxcsr=0x208c xcr0&~=all_avx -> op1=0x208c 5929 * @optest !amd / mxcsr=0x208d xcr0&~=all_avx_sse -> value.xcpt=0x6 5930 * @optest amd / mxcsr=0x208d xcr0&~=all_avx_sse -> op1=0x208d 5931 * @optest !amd / mxcsr=0x208e xcr0&~=all_avx cr0|=ts -> value.xcpt=0x6 5932 * @optest amd / mxcsr=0x208e xcr0&~=all_avx cr0|=ts -> value.xcpt=0x7 5921 5933 * @optest mxcsr=0x2082 cr0|=ts cr4&~=osxsave -> value.xcpt=0x6 5922 5934 * @optest mxcsr=0x2081 xcr0&~=all_avx cr0|=ts cr4&~=osxsave 5923 5935 * -> value.xcpt=0x6 5936 * @remarks AMD Jaguar CPU (f0x16,m0,s1) \#UD when CR0.EM is set. It also 5937 * doesn't seem to check XCR0[2:1] != 11b. This does not match the 5938 * APMv4 rev 3.17 page 509. 5939 * @todo Test this instruction on AMD Ryzen. 5924 5940 * @oponlytest 5925 5941 */ … … 5934 5950 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5935 5951 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5936 IEMOP_HLP_DONE_ DECODING_NO_LOCK_PREFIX();5952 IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV(); 5937 5953 IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ(); 5938 5954 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 5939 IEM_MC_CALL_CIMPL_2(iemCImpl_ stmxcsr, iEffSeg, GCPtrEff);5955 IEM_MC_CALL_CIMPL_2(iemCImpl_vstmxcsr, iEffSeg, GCPtrEff); 5940 5956 IEM_MC_END(); 5941 5957 return VINF_SUCCESS; … … 5943 5959 5944 5960 5945 /** Opcode 0x0f 0xae mem/4. */ 5946 FNIEMOP_UD_STUB_1(iemOp_Grp15_xsave, uint8_t, bRm); 5947 5948 /** Opcode 0x0f 0xae mem/5. */ 5949 FNIEMOP_UD_STUB_1(iemOp_Grp15_xrstor, uint8_t, bRm); 5961 /** 5962 * @opmaps vexgrp15 5963 * @opcode !11/4 5964 * @oppfx none 5965 * @opcpuid xsave 5966 * @opgroup og_system 5967 * @opxcpttype none 5968 */ 5969 FNIEMOP_DEF_1(iemOp_Grp15_xsave, uint8_t, bRm) 5970 { 5971 IEMOP_MNEMONIC1(M_MEM, XSAVE, xsave, MRW, DISOPTYPE_HARMLESS, 0); 5972 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 5973 return IEMOP_RAISE_INVALID_OPCODE(); 5974 5975 IEM_MC_BEGIN(3, 0); 5976 IEM_MC_ARG(uint8_t, iEffSeg, 0); 5977 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 5978 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 2); 5979 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 5980 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 5981 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 5982 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 5983 IEM_MC_CALL_CIMPL_3(iemCImpl_xsave, iEffSeg, GCPtrEff, enmEffOpSize); 5984 IEM_MC_END(); 5985 return VINF_SUCCESS; 5986 } 5987 5988 5989 /** 5990 * @opmaps vexgrp15 5991 * @opcode !11/5 5992 * @oppfx none 5993 * @opcpuid xsave 5994 * @opgroup og_system 5995 * @opxcpttype none 5996 */ 5997 FNIEMOP_DEF_1(iemOp_Grp15_xrstor, uint8_t, bRm) 5998 { 5999 IEMOP_MNEMONIC1(M_MEM, XRSTOR, xrstor, MRO, DISOPTYPE_HARMLESS, 0); 6000 if (!IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fXSaveRstor) 6001 return IEMOP_RAISE_INVALID_OPCODE(); 6002 6003 IEM_MC_BEGIN(3, 0); 6004 IEM_MC_ARG(uint8_t, iEffSeg, 0); 6005 IEM_MC_ARG(RTGCPTR, GCPtrEff, 1); 6006 IEM_MC_ARG_CONST(IEMMODE, enmEffOpSize,/*=*/pVCpu->iem.s.enmEffOpSize, 2); 6007 IEM_MC_CALC_RM_EFF_ADDR(GCPtrEff, bRm, 0); 6008 IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX(); 6009 IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ(); 6010 IEM_MC_ASSIGN(iEffSeg, pVCpu->iem.s.iEffSeg); 6011 IEM_MC_CALL_CIMPL_3(iemCImpl_xrstor, iEffSeg, GCPtrEff, enmEffOpSize); 6012 IEM_MC_END(); 6013 return VINF_SUCCESS; 6014 } 5950 6015 5951 6016 /** Opcode 0x0f 0xae mem/6. */ … … 8725 8790 /* 0x8e */ IEMOP_X4(iemOp_InvalidNeedRM), 8726 8791 /* 0x8f */ IEMOP_X4(iemOp_InvalidNeedRM), 8727 IEMOP_X4(iemOp_InvalidNeedRM), 8792 8728 8793 /* 0x90 */ IEMOP_X4(iemOp_InvalidNeedRM), 8729 8794 /* 0x91 */ IEMOP_X4(iemOp_InvalidNeedRM), … … 8845 8910 /* 0xff */ IEMOP_X4(iemOp_ud0), 8846 8911 }; 8847 AssertCompile(RT_ELEMENTS(g_apfn TwoByteMap) == 1024);8912 AssertCompile(RT_ELEMENTS(g_apfnVexMap1) == 1024); 8848 8913 /** @} */ 8849 8914 -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r66314 r66457 119 119 #define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() do { } while (0) 120 120 #define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) do { } while (0) 121 #define IEMOP_HLP_DONE_ DECODING()do { } while (0)121 #define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() do { } while (0) 122 122 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() do { } while (0) 123 123 #define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() do { } while (0) 124 #define IEMOP_HLP_DONE_DECODING() do { } while (0) 125 #define IEMOP_HLP_DONE_VEX_DECODING() do { } while (0) 126 124 127 #define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) do { } while (0) 125 128 #define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) do { } while (0) -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1-template.c
r66450 r66457 98 98 } BS3CG1OPLOC; 99 99 100 100 101 /** 101 102 * The state. … … 151 152 /** Target mode (g_bBs3CurrentMode). */ 152 153 uint8_t bMode; 154 /** The CPU vendor (BS3CPUVENDOR). */ 155 uint8_t bCpuVendor; 153 156 /** First ring being tested. */ 154 157 uint8_t iFirstRing; … … 166 169 uint8_t offCurImm; 167 170 /** Buffer for assembling the current instruction. */ 168 uint8_t abCurInstr[2 4];171 uint8_t abCurInstr[23]; 169 172 170 173 /** Set if the encoding can't be tested in the same ring as this test code. … … 2467 2470 CASE_PRED(BS3CG1PRED_PAGING_ON, BS3_MODE_IS_PAGED(pThis->bMode)); 2468 2471 CASE_PRED(BS3CG1PRED_PAGING_OFF, !BS3_MODE_IS_PAGED(pThis->bMode)); 2472 CASE_PRED(BS3CG1PRED_VENDOR_AMD, pThis->bCpuVendor == BS3CPUVENDOR_AMD); 2473 CASE_PRED(BS3CG1PRED_VENDOR_INTEL, pThis->bCpuVendor == BS3CPUVENDOR_INTEL); 2474 CASE_PRED(BS3CG1PRED_VENDOR_VIA, pThis->bCpuVendor == BS3CPUVENDOR_VIA); 2469 2475 2470 2476 #undef CASE_PRED … … 2905 2911 * 2906 2912 * @returns true if successful, false if not. 2907 * @param pThis The state. 2908 * @param bTestXcptExpected The exception causing the test code to stop 2909 * executing. 2910 * @param iEncoding For error reporting. 2913 * @param pThis The state. 2914 * @param bTestXcptExpected The exception causing the test code to stop 2915 * executing. 2916 * @param fInvalidEncodingPgFault Set if we've cut the instruction a byte 2917 * short and is expecting a \#PF on the page 2918 * boundrary rather than a \#UD. Only set if 2919 * fInvalidEncoding is also set. 2920 * @param iEncoding For error reporting. 2911 2921 */ 2912 static bool BS3_NEAR_CODE Bs3Cg1CheckResult(PBS3CG1STATE pThis, uint8_t bTestXcptExpected, unsigned iEncoding) 2922 static bool BS3_NEAR_CODE Bs3Cg1CheckResult(PBS3CG1STATE pThis, uint8_t bTestXcptExpected, 2923 bool fInvalidEncodingPgFault, unsigned iEncoding) 2913 2924 { 2914 2925 unsigned iOperand; … … 2937 2948 { 2938 2949 cbAdjustPc = 0; 2939 bExpectedXcpt = X86_XCPT_UD; 2950 if (!fInvalidEncodingPgFault) 2951 bExpectedXcpt = X86_XCPT_UD; 2952 else 2953 { 2954 bExpectedXcpt = X86_XCPT_PF; 2955 pThis->Ctx.cr2.u = pThis->uCodePgFlat + X86_PAGE_SIZE; 2956 } 2940 2957 } 2941 2958 if (RT_LIKELY( pThis->TrapFrame.bXcpt == bExpectedXcpt … … 2958 2975 if (pThis->aOperands[iOperand].enmLocation == BS3CG1OPLOC_MEM_RW) 2959 2976 { 2960 BS3PTRUNION PtrUnion; 2961 PtrUnion.pb = &pThis->pbDataPg[X86_PAGE_SIZE - pThis->aOperands[iOperand].off]; 2962 switch (pThis->aOperands[iOperand].cbOp) 2977 if (pThis->aOperands[iOperand].off) 2963 2978 { 2964 case 1: 2965 if (*PtrUnion.pu8 == pThis->MemOp.ab[0]) 2966 continue; 2967 Bs3TestFailedF("op%u: Wrote %#04RX8, expected %#04RX8", iOperand, *PtrUnion.pu8, pThis->MemOp.ab[0]); 2968 break; 2969 case 2: 2970 if (*PtrUnion.pu16 == pThis->MemOp.au16[0]) 2971 continue; 2972 Bs3TestFailedF("op%u: Wrote %#06RX16, expected %#06RX16", 2973 iOperand, *PtrUnion.pu16, pThis->MemOp.au16[0]); 2974 break; 2975 case 4: 2976 if (*PtrUnion.pu32 == pThis->MemOp.au32[0]) 2977 continue; 2978 Bs3TestFailedF("op%u: Wrote %#010RX32, expected %#010RX32", 2979 iOperand, *PtrUnion.pu32, pThis->MemOp.au32[0]); 2980 break; 2981 case 8: 2982 if (*PtrUnion.pu64 == pThis->MemOp.au64[0]) 2983 continue; 2984 Bs3TestFailedF("op%u: Wrote %#018RX64, expected %#018RX64", 2985 iOperand, *PtrUnion.pu64, pThis->MemOp.au64[0]); 2986 break; 2987 default: 2988 if (Bs3MemCmp(PtrUnion.pb, pThis->MemOp.ab, pThis->aOperands[iOperand].cbOp) == 0) 2989 continue; 2990 Bs3TestFailedF("op%u: Wrote %.*Rhxs, expected %.*Rhxs", 2991 iOperand, 2992 pThis->aOperands[iOperand].cbOp, PtrUnion.pb, 2993 pThis->aOperands[iOperand].cbOp, pThis->MemOp.ab); 2994 break; 2979 BS3PTRUNION PtrUnion; 2980 PtrUnion.pb = &pThis->pbDataPg[X86_PAGE_SIZE - pThis->aOperands[iOperand].off]; 2981 switch (pThis->aOperands[iOperand].cbOp) 2982 { 2983 case 1: 2984 if (*PtrUnion.pu8 == pThis->MemOp.ab[0]) 2985 continue; 2986 Bs3TestFailedF("op%u: Wrote %#04RX8, expected %#04RX8", 2987 iOperand, *PtrUnion.pu8, pThis->MemOp.ab[0]); 2988 break; 2989 case 2: 2990 if (*PtrUnion.pu16 == pThis->MemOp.au16[0]) 2991 continue; 2992 Bs3TestFailedF("op%u: Wrote %#06RX16, expected %#06RX16", 2993 iOperand, *PtrUnion.pu16, pThis->MemOp.au16[0]); 2994 break; 2995 case 4: 2996 if (*PtrUnion.pu32 == pThis->MemOp.au32[0]) 2997 continue; 2998 Bs3TestFailedF("op%u: Wrote %#010RX32, expected %#010RX32", 2999 iOperand, *PtrUnion.pu32, pThis->MemOp.au32[0]); 3000 break; 3001 case 8: 3002 if (*PtrUnion.pu64 == pThis->MemOp.au64[0]) 3003 continue; 3004 Bs3TestFailedF("op%u: Wrote %#018RX64, expected %#018RX64", 3005 iOperand, *PtrUnion.pu64, pThis->MemOp.au64[0]); 3006 break; 3007 default: 3008 if (Bs3MemCmp(PtrUnion.pb, pThis->MemOp.ab, pThis->aOperands[iOperand].cbOp) == 0) 3009 continue; 3010 Bs3TestFailedF("op%u: Wrote %.*Rhxs, expected %.*Rhxs", 3011 iOperand, 3012 pThis->aOperands[iOperand].cbOp, PtrUnion.pb, 3013 pThis->aOperands[iOperand].cbOp, pThis->MemOp.ab); 3014 break; 3015 } 2995 3016 } 3017 else 3018 Bs3TestFailedF("op%u: off is zero\n", iOperand); 2996 3019 fOkay = false; 2997 3020 } … … 3056 3079 * Report failure. 3057 3080 */ 3058 Bs3TestFailedF("%RU32[%u]: encoding#%u: %.*Rhxs", 3059 pThis->iInstr, pThis->iTest, iEncoding, pThis->cbCurInstr, pThis->abCurInstr); 3081 Bs3TestFailedF("ins#%RU32/test#%u: encoding #%u: %.*Rhxs%s", 3082 pThis->iInstr, pThis->iTest, iEncoding, pThis->cbCurInstr, pThis->abCurInstr, 3083 fInvalidEncodingPgFault ? " (cut short)" : ""); 3060 3084 } 3061 3085 else 3062 Bs3TestFailedF(" %RU32[%u]: bXcpt=%#x expected %#x; rip=%RX64 expected %RX64; encoding#%u: %.*Rhxs",3086 Bs3TestFailedF("ins#%RU32/test#%u: bXcpt=%#x expected %#x; rip=%RX64 expected %RX64; encoding#%u: %.*Rhxs%s", 3063 3087 pThis->iInstr, pThis->iTest, 3064 3088 pThis->TrapFrame.bXcpt, bExpectedXcpt, 3065 3089 pThis->TrapFrame.Ctx.rip.u, pThis->Ctx.rip.u + cbAdjustPc, 3066 iEncoding, pThis->cbCurInstr, pThis->abCurInstr );3090 iEncoding, pThis->cbCurInstr, pThis->abCurInstr, fInvalidEncodingPgFault ? " (cut short)" : ""); 3067 3091 Bs3TestPrintf("cpl=%u cbOperands=%u\n", pThis->uCpl, pThis->cbOperand); 3068 3092 … … 3117 3141 case BS3CG1OPLOC_MEM: 3118 3142 case BS3CG1OPLOC_MEM_RW: 3119 PtrUnion.pb = &pThis->pbDataPg[X86_PAGE_SIZE - pThis->aOperands[iOperand].off]; 3120 switch (pThis->aOperands[iOperand].cbOp) 3143 if (pThis->aOperands[iOperand].off) 3121 3144 { 3122 case 1: Bs3TestPrintf("op%u: result mem08: %#04RX8\n", iOperand, *PtrUnion.pu8); break; 3123 case 2: Bs3TestPrintf("op%u: result mem16: %#06RX16\n", iOperand, *PtrUnion.pu16); break; 3124 case 4: Bs3TestPrintf("op%u: result mem32: %#010RX32\n", iOperand, *PtrUnion.pu32); break; 3125 case 8: Bs3TestPrintf("op%u: result mem64: %#018RX64\n", iOperand, *PtrUnion.pu64); break; 3126 default: 3127 Bs3TestPrintf("op%u: result mem%u: %.*Rhxs\n", iOperand, pThis->aOperands[iOperand].cbOp * 8, 3128 pThis->aOperands[iOperand].cbOp, PtrUnion.pb); 3129 break; 3130 } 3131 if (pThis->aOperands[iOperand].enmLocation == BS3CG1OPLOC_MEM_RW) 3132 { 3133 PtrUnion.pb = pThis->MemOp.ab; 3145 PtrUnion.pb = &pThis->pbDataPg[X86_PAGE_SIZE - pThis->aOperands[iOperand].off]; 3134 3146 switch (pThis->aOperands[iOperand].cbOp) 3135 3147 { 3136 case 1: Bs3TestPrintf("op%u: expect mem08: %#04RX8\n", iOperand, *PtrUnion.pu8); break;3137 case 2: Bs3TestPrintf("op%u: expect mem16: %#06RX16\n", iOperand, *PtrUnion.pu16); break;3138 case 4: Bs3TestPrintf("op%u: expect mem32: %#010RX32\n", iOperand, *PtrUnion.pu32); break;3139 case 8: Bs3TestPrintf("op%u: expect mem64: %#018RX64\n", iOperand, *PtrUnion.pu64); break;3148 case 1: Bs3TestPrintf("op%u: result mem08: %#04RX8\n", iOperand, *PtrUnion.pu8); break; 3149 case 2: Bs3TestPrintf("op%u: result mem16: %#06RX16\n", iOperand, *PtrUnion.pu16); break; 3150 case 4: Bs3TestPrintf("op%u: result mem32: %#010RX32\n", iOperand, *PtrUnion.pu32); break; 3151 case 8: Bs3TestPrintf("op%u: result mem64: %#018RX64\n", iOperand, *PtrUnion.pu64); break; 3140 3152 default: 3141 Bs3TestPrintf("op%u: expect mem%u: %.*Rhxs\n", iOperand, pThis->aOperands[iOperand].cbOp * 8,3153 Bs3TestPrintf("op%u: result mem%u: %.*Rhxs\n", iOperand, pThis->aOperands[iOperand].cbOp * 8, 3142 3154 pThis->aOperands[iOperand].cbOp, PtrUnion.pb); 3143 3155 break; 3144 3156 } 3157 if (pThis->aOperands[iOperand].enmLocation == BS3CG1OPLOC_MEM_RW) 3158 { 3159 PtrUnion.pb = pThis->MemOp.ab; 3160 switch (pThis->aOperands[iOperand].cbOp) 3161 { 3162 case 1: Bs3TestPrintf("op%u: expect mem08: %#04RX8\n", iOperand, *PtrUnion.pu8); break; 3163 case 2: Bs3TestPrintf("op%u: expect mem16: %#06RX16\n", iOperand, *PtrUnion.pu16); break; 3164 case 4: Bs3TestPrintf("op%u: expect mem32: %#010RX32\n", iOperand, *PtrUnion.pu32); break; 3165 case 8: Bs3TestPrintf("op%u: expect mem64: %#018RX64\n", iOperand, *PtrUnion.pu64); break; 3166 default: 3167 Bs3TestPrintf("op%u: expect mem%u: %.*Rhxs\n", iOperand, pThis->aOperands[iOperand].cbOp * 8, 3168 pThis->aOperands[iOperand].cbOp, PtrUnion.pb); 3169 break; 3170 } 3171 } 3145 3172 } 3173 else 3174 Bs3TestPrintf("op%u: mem%u: zero off value!!\n", iOperand, pThis->aOperands[iOperand].cbOp * 8); 3146 3175 break; 3147 3176 } … … 3153 3182 Bs3TestPrintf("-- Expected context:\n"); 3154 3183 Bs3RegCtxPrint(&pThis->Ctx); 3184 if (pThis->fWorkExtCtx) 3185 Bs3TestPrintf("xcr0=%RX64\n", pThis->pExtCtx->fXcr0Saved); 3155 3186 Bs3TestPrintf("-- Actual context:\n"); 3156 3187 Bs3TrapPrintFrame(&pThis->TrapFrame); 3188 if (pThis->fWorkExtCtx) 3189 Bs3TestPrintf("xcr0=%RX64\n", pThis->pResultExtCtx->fXcr0Saved); 3157 3190 Bs3TestPrintf("\n"); 3158 3191 return false; … … 3215 3248 pThis->pszMode = Bs3GetModeName(bMode); 3216 3249 pThis->pszModeShort = Bs3GetModeNameShortLower(bMode); 3250 pThis->bCpuVendor = Bs3GetCpuVendor(); 3217 3251 pThis->pchMnemonic = g_achBs3Cg1Mnemonics; 3218 3252 pThis->pabOperands = g_abBs3Cg1Operands; … … 3566 3600 pThis->Ctx.rflags.u32 &= ~X86_EFL_RF; 3567 3601 pThis->Ctx.rflags.u32 |= pThis->TrapFrame.Ctx.rflags.u32 & X86_EFL_RF; 3568 pThis->bValueXcpt = UINT8_MAX; 3602 pThis->bValueXcpt = UINT8_MAX; //??? 3569 3603 if ( pThis->fInvalidEncoding 3570 3604 || pThis->bAlignmentXcpt != UINT8_MAX … … 3574 3608 &pThis->TrapFrame.Ctx, NULL /*pbCode*/)) 3575 3609 { 3576 Bs3Cg1CheckResult(pThis, bTestXcptExpected, iEncoding); 3610 Bs3Cg1CheckResult(pThis, bTestXcptExpected, false /*fInvalidEncodingPgFault*/, iEncoding); 3611 } 3612 3613 /* 3614 * If this is an invalid encoding or instruction, check that we 3615 * get a page fault when shortening it by one byte. 3616 * (Since we didn't execute the output context modifier, we don't 3617 * need to re-initialize the start context.) 3618 */ 3619 if ( pThis->fInvalidEncoding 3620 && BS3_MODE_IS_PAGED(pThis->bMode) 3621 && pThis->cbCurInstr) 3622 { 3623 pbCode += 1; 3624 offCode += 1; 3625 pThis->Ctx.rip.u = pThis->CodePgRip + offCode; 3626 Bs3MemCpy(pbCode, pThis->abCurInstr, pThis->cbCurInstr - 1); 3627 3628 /* Run the instruction. */ 3629 BS3CG1_DPRINTF(("dbg: Running test #%u (cut short #PF)\n", pThis->iTest)); 3630 //Bs3RegCtxPrint(&pThis->Ctx); 3631 if (pThis->fWorkExtCtx) 3632 Bs3ExtCtxRestore(pThis->pExtCtx); 3633 Bs3TrapSetJmpAndRestore(&pThis->Ctx, &pThis->TrapFrame); 3634 if (pThis->fWorkExtCtx) 3635 Bs3ExtCtxSave(pThis->pResultExtCtx); 3636 BS3CG1_DPRINTF(("dbg: bXcpt=%#x rip=%RX64 -> %RX64 (cut short #PF)\n", 3637 pThis->TrapFrame.bXcpt, pThis->Ctx.rip.u, pThis->TrapFrame.Ctx.rip.u)); 3638 3639 /* Check it */ 3640 pThis->Ctx.rflags.u32 &= ~X86_EFL_RF; 3641 pThis->Ctx.rflags.u32 |= pThis->TrapFrame.Ctx.rflags.u32 & X86_EFL_RF; 3642 Bs3Cg1CheckResult(pThis, X86_XCPT_PF, true /*fInvalidEncodingPgFault*/, iEncoding); 3577 3643 } 3578 3644 } … … 3614 3680 #if 0 3615 3681 /* (for debugging) */ 3616 if (bMode >= BS3_MODE_PE16)3682 if (bMode != BS3_MODE_PPV86) 3617 3683 return BS3TESTDOMODE_SKIPPED; 3618 3684 #endif -
trunk/src/VBox/ValidationKit/bootsectors/bs3-cpu-generated-1.h
r66450 r66457 575 575 typedef enum BS3CG1PRED 576 576 { 577 BS3CG1PRED_INVALID = 0, 578 579 /* Operand size. */ 580 BS3CG1PRED_SIZE_O16, 581 BS3CG1PRED_SIZE_O32, 582 BS3CG1PRED_SIZE_O64, 583 /* Execution ring. */ 584 BS3CG1PRED_RING_0, 585 BS3CG1PRED_RING_1, 586 BS3CG1PRED_RING_2, 587 BS3CG1PRED_RING_3, 588 BS3CG1PRED_RING_0_THRU_2, 589 BS3CG1PRED_RING_1_THRU_3, 590 /* Basic code mode. */ 591 BS3CG1PRED_CODE_64BIT, 592 BS3CG1PRED_CODE_32BIT, 593 BS3CG1PRED_CODE_16BIT, 594 /* CPU modes. */ 595 BS3CG1PRED_MODE_REAL, 596 BS3CG1PRED_MODE_PROT, 597 BS3CG1PRED_MODE_LONG, 598 BS3CG1PRED_MODE_V86, 599 BS3CG1PRED_MODE_SMM, 600 BS3CG1PRED_MODE_VMX, 601 BS3CG1PRED_MODE_SVM, 602 /* Paging on/off */ 603 BS3CG1PRED_PAGING_ON, 604 BS3CG1PRED_PAGING_OFF, 605 606 BS3CG1PRED_END 577 BS3CG1PRED_INVALID = 0, 578 579 /* Operand size. */ 580 BS3CG1PRED_SIZE_O16, 581 BS3CG1PRED_SIZE_O32, 582 BS3CG1PRED_SIZE_O64, 583 /* Execution ring. */ 584 BS3CG1PRED_RING_0, 585 BS3CG1PRED_RING_1, 586 BS3CG1PRED_RING_2, 587 BS3CG1PRED_RING_3, 588 BS3CG1PRED_RING_0_THRU_2, 589 BS3CG1PRED_RING_1_THRU_3, 590 /* Basic code mode. */ 591 BS3CG1PRED_CODE_64BIT, 592 BS3CG1PRED_CODE_32BIT, 593 BS3CG1PRED_CODE_16BIT, 594 /* CPU modes. */ 595 BS3CG1PRED_MODE_REAL, 596 BS3CG1PRED_MODE_PROT, 597 BS3CG1PRED_MODE_LONG, 598 BS3CG1PRED_MODE_V86, 599 BS3CG1PRED_MODE_SMM, 600 BS3CG1PRED_MODE_VMX, 601 BS3CG1PRED_MODE_SVM, 602 /* Paging on/off */ 603 BS3CG1PRED_PAGING_ON, 604 BS3CG1PRED_PAGING_OFF, 605 /* CPU Vendors. */ 606 BS3CG1PRED_VENDOR_AMD, 607 BS3CG1PRED_VENDOR_INTEL, 608 BS3CG1PRED_VENDOR_VIA, 609 610 BS3CG1PRED_END 607 611 } BS3CG1PRED; 608 612
Note:
See TracChangeset
for help on using the changeset viewer.