Changeset 103908 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Mar 19, 2024 9:01:35 AM (11 months ago)
- svn:sync-xref-src-repo-rev:
- 162282
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r103886 r103908 7211 7211 #include "IEMAllMemRWTmpl.cpp.h" 7212 7212 7213 #define TMPL_MEM_TYPE RTUINT128U 7214 #define TMPL_MEM_TYPE_ALIGN (sizeof(RTUINT128U) - 1) 7215 #define TMPL_MEM_MAP_FLAGS_ADD (IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE) 7216 #define TMPL_MEM_FN_SUFF U128AlignedSse 7217 #define TMPL_MEM_FMT_TYPE "%.16Rhxs" 7218 #define TMPL_MEM_FMT_DESC "dqword" 7219 #include "IEMAllMemRWTmpl.cpp.h" 7220 7213 7221 #define TMPL_MEM_TYPE RTUINT128U 7214 7222 #define TMPL_MEM_TYPE_ALIGN 0 … … 7274 7282 #endif 7275 7283 return rc; 7276 }7277 #endif7278 7279 7280 /**7281 * Fetches a data dqword (double qword) at an aligned address, generally SSE7282 * related.7283 *7284 * Raises \#GP(0) if not aligned.7285 *7286 * @returns Strict VBox status code.7287 * @param pVCpu The cross context virtual CPU structure of the calling thread.7288 * @param pu128Dst Where to return the qword.7289 * @param iSegReg The index of the segment register to use for7290 * this access. The base and limits are checked.7291 * @param GCPtrMem The address of the guest memory.7292 */7293 VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT7294 {7295 /* The lazy approach for now... */7296 uint8_t bUnmapInfo;7297 PCRTUINT128U pu128Src;7298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem,7299 IEM_ACCESS_DATA_R, (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);7300 if (rc == VINF_SUCCESS)7301 {7302 pu128Dst->au64[0] = pu128Src->au64[0];7303 pu128Dst->au64[1] = pu128Src->au64[1];7304 rc = iemMemCommitAndUnmap(pVCpu, bUnmapInfo);7305 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7306 }7307 return rc;7308 }7309 7310 7311 #ifdef IEM_WITH_SETJMP7312 /**7313 * Fetches a data dqword (double qword) at an aligned address, generally SSE7314 * related, longjmp on error.7315 *7316 * Raises \#GP(0) if not aligned.7317 *7318 * @param pVCpu The cross context virtual CPU structure of the calling thread.7319 * @param pu128Dst Where to return the qword.7320 * @param iSegReg The index of the segment register to use for7321 * this access. The base and limits are checked.7322 * @param GCPtrMem The address of the guest memory.7323 */7324 void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg,7325 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP7326 {7327 /* The lazy approach for now... */7328 uint8_t bUnmapInfo;7329 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R,7330 (sizeof(*pu128Src) - 1) | IEM_MEMMAP_F_ALIGN_GP | IEM_MEMMAP_F_ALIGN_SSE);7331 pu128Dst->au64[0] = pu128Src->au64[0];7332 pu128Dst->au64[1] = pu128Src->au64[1];7333 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo);7334 Log(("IEM RD dqword %d|%RGv: %.16Rhxs\n", iSegReg, GCPtrMem, pu128Dst));7335 7284 } 7336 7285 #endif -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r102977 r103908 43 43 # error "TMPL_MEM_FMT_DESC is undefined" 44 44 #endif 45 #ifndef TMPL_MEM_MAP_FLAGS_ADD 46 # define TMPL_MEM_MAP_FLAGS_ADD (0) 47 #endif 45 48 46 49 … … 58 61 TMPL_MEM_TYPE const *puSrc; 59 62 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem, 60 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN );63 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 61 64 if (rc == VINF_SUCCESS) 62 65 { … … 82 85 uint8_t bUnmapInfo; 83 86 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*pSrc), iSegReg, GCPtrMem, 84 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN );87 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 85 88 *pDst = *pSrc; 86 89 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 96 99 uint8_t bUnmapInfo; 97 100 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puSrc), iSegReg, GCPtrMem, 98 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN );101 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 99 102 TMPL_MEM_TYPE const uRet = *puSrc; 100 103 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 124 127 TMPL_MEM_TYPE *puDst; 125 128 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(*puDst), 126 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN );129 iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 127 130 if (rc == VINF_SUCCESS) 128 131 { … … 170 173 uint8_t bUnmapInfo; 171 174 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(*puDst), iSegReg, GCPtrMem, 172 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN );175 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 173 176 #ifdef TMPL_MEM_BY_REF 174 177 *puDst = *pValue; … … 203 206 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ 204 207 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 205 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN );208 IEM_ACCESS_DATA_ATOMIC, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 206 209 } 207 210 … … 227 230 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */ 228 231 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 229 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN );232 IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 230 233 } 231 234 … … 251 254 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */ 252 255 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 253 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN );256 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 254 257 } 255 258 … … 275 278 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */ 276 279 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, pbUnmapInfo, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, 277 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN );280 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 278 281 } 279 282 … … 303 306 TMPL_MEM_TYPE const *puSrc; 304 307 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 305 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN );308 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 306 309 if (rc == VINF_SUCCESS) 307 310 { … … 344 347 TMPL_MEM_TYPE *puDst; 345 348 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 346 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN );349 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 347 350 if (rc == VINF_SUCCESS) 348 351 { … … 384 387 TMPL_MEM_TYPE const *puSrc; 385 388 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 386 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN );389 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 387 390 if (rc == VINF_SUCCESS) 388 391 { … … 422 425 TMPL_MEM_TYPE *puDst; 423 426 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 424 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN );427 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 425 428 if (rc == VINF_SUCCESS) 426 429 { … … 461 464 TMPL_MEM_TYPE const *puSrc; 462 465 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 463 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN );466 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 464 467 if (rc == VINF_SUCCESS) 465 468 { … … 494 497 uint8_t bUnmapInfo; 495 498 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrMem, 496 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN );499 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 497 500 *puDst = uValue; 498 501 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 527 530 /* WORD per intel specs. */ 528 531 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrMem, 529 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */532 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */ 530 533 *puDst = (uint16_t)uValue; 531 534 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 539 542 * something for the CPU profile... Hope not.) */ 540 543 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem, 541 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1);544 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); 542 545 *puDst = (uint16_t)uValue | (pVCpu->cpum.GstCtx.eflags.u & (UINT32_C(0xffff0000) & ~X86_EFL_RAZ_MASK)); 543 546 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 549 552 /* DWORD per spec. */ 550 553 uint32_t *puDst = (uint32_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint32_t), X86_SREG_SS, GCPtrMem, 551 IEM_ACCESS_STACK_W, sizeof(uint32_t) - 1);554 IEM_ACCESS_STACK_W, (sizeof(uint32_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); 552 555 *puDst = uValue; 553 556 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 570 573 uint8_t bUnmapInfo; 571 574 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, 572 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN );575 GCPtrMem, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 573 576 TMPL_MEM_TYPE const uValue = *puSrc; 574 577 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 596 599 uint8_t bUnmapInfo; 597 600 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop, 598 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN );601 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 599 602 *puDst = uValue; 600 603 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 623 626 uint8_t bUnmapInfo; 624 627 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, 625 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN );628 GCPtrTop, IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN | TMPL_MEM_MAP_FLAGS_ADD); 626 629 TMPL_MEM_TYPE const uValue = *puSrc; 627 630 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 664 667 uint8_t bUnmapInfo; 665 668 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, &bUnmapInfo, sizeof(uint16_t), X86_SREG_SS, GCPtrTop, 666 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */669 IEM_ACCESS_STACK_W, (sizeof(uint16_t) - 1) | TMPL_MEM_MAP_FLAGS_ADD); /** @todo 2 or 4 alignment check for PUSH SS? */ 667 670 *puDst = (uint16_t)uValue; 668 671 iemMemCommitAndUnmapJmp(pVCpu, bUnmapInfo); … … 686 689 #undef TMPL_MEM_FMT_DESC 687 690 #undef TMPL_WITH_PUSH_SREG 688 691 #undef TMPL_MEM_MAP_FLAGS_ADD 692 -
trunk/src/VBox/VMM/include/IEMInline.h
r103256 r103908 4091 4091 #undef TMPL_MEM_NO_MAPPING 4092 4092 4093 4094 /* Every template reyling on unaligned accesses inside a page not being okay should go below. */ 4095 #undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK 4096 #define TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK(a_pVCpu, a_GCPtrEff, a_TmplMemType) 0 4097 4098 #define TMPL_MEM_NO_MAPPING 4099 #define TMPL_MEM_TYPE RTUINT128U 4100 #define TMPL_MEM_TYPE_ALIGN 15 4101 #define TMPL_MEM_TYPE_SIZE 16 4102 #define TMPL_MEM_FN_SUFF U128AlignedSse 4103 #define TMPL_MEM_FMT_TYPE "%.16Rhxs" 4104 #define TMPL_MEM_FMT_DESC "dqword" 4105 #include "../VMMAll/IEMAllMemRWTmplInline.cpp.h" 4106 #undef TMPL_MEM_NO_MAPPING 4107 4108 4093 4109 #undef TMPL_MEM_CHECK_UNALIGNED_WITHIN_PAGE_OK 4094 4110 -
trunk/src/VBox/VMM/include/IEMInternal.h
r103900 r103908 5492 5492 void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5493 5493 void iemMemFetchDataU128NoAcJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5494 void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5494 5495 # endif 5495 void iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP;5496 5496 void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; 5497 5497 void iemMemFetchDataU256NoAcJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP; … … 5522 5522 void iemMemStoreDataU64SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value) IEM_NOEXCEPT_MAY_LONGJMP; 5523 5523 void iemMemStoreDataU128SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP; 5524 void iemMemStoreDataU128NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U u128Value) IEM_NOEXCEPT_MAY_LONGJMP;5525 void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128Uu128Value) IEM_NOEXCEPT_MAY_LONGJMP;5524 void iemMemStoreDataU128NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP; 5525 void iemMemStoreDataU128AlignedSseSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT128U pu128Value) IEM_NOEXCEPT_MAY_LONGJMP; 5526 5526 void iemMemStoreDataU256SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP; 5527 5527 void iemMemStoreDataU256NoAcSafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value) IEM_NOEXCEPT_MAY_LONGJMP;
Note:
See TracChangeset
for help on using the changeset viewer.