Changeset 103828 in vbox
- Timestamp:
- Mar 13, 2024 2:01:20 PM (10 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllInstCommonBodyMacros.h
r103721 r103828 116 116 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \ 117 117 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \ 118 IEM_MC_COMMIT_EFLAGS (uEFlags); \118 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 119 119 } IEM_MC_NATIVE_ELSE() { \ 120 120 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 141 141 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \ 142 142 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \ 143 IEM_MC_COMMIT_EFLAGS (uEFlags); \143 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 144 144 } IEM_MC_NATIVE_ELSE() { \ 145 145 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 167 167 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \ 168 168 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \ 169 IEM_MC_COMMIT_EFLAGS (uEFlags); \169 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 170 170 } IEM_MC_NATIVE_ELSE() { \ 171 171 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ … … 204 204 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \ 205 205 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_REG(pVCpu, bRm), u16Dst); \ 206 IEM_MC_COMMIT_EFLAGS (uEFlags); \206 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 207 207 } IEM_MC_NATIVE_ELSE() { \ 208 208 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 231 231 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \ 232 232 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_REG(pVCpu, bRm), u32Dst); \ 233 IEM_MC_COMMIT_EFLAGS (uEFlags); \233 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 234 234 } IEM_MC_NATIVE_ELSE() { \ 235 235 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 259 259 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \ 260 260 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_REG(pVCpu, bRm), u64Dst); \ 261 IEM_MC_COMMIT_EFLAGS (uEFlags); \261 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 262 262 } IEM_MC_NATIVE_ELSE() { \ 263 263 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ … … 302 302 IEM_MC_FETCH_EFLAGS(uEFlags); \ 303 303 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \ 304 IEM_MC_COMMIT_EFLAGS (uEFlags); \304 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 305 305 } IEM_MC_NATIVE_ELSE() { \ 306 306 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 326 326 IEM_MC_FETCH_EFLAGS(uEFlags); \ 327 327 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \ 328 IEM_MC_COMMIT_EFLAGS (uEFlags); \328 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 329 329 } IEM_MC_NATIVE_ELSE() { \ 330 330 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 350 350 IEM_MC_FETCH_EFLAGS(uEFlags); \ 351 351 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \ 352 IEM_MC_COMMIT_EFLAGS (uEFlags); \352 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 353 353 } IEM_MC_NATIVE_ELSE() { \ 354 354 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ … … 386 386 IEM_MC_FETCH_EFLAGS(uEFlags); \ 387 387 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \ 388 IEM_MC_COMMIT_EFLAGS (uEFlags); \388 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 389 389 } IEM_MC_NATIVE_ELSE() { \ 390 390 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 412 412 IEM_MC_FETCH_EFLAGS(uEFlags); \ 413 413 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \ 414 IEM_MC_COMMIT_EFLAGS (uEFlags); \414 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 415 415 } IEM_MC_NATIVE_ELSE() { \ 416 416 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 438 438 IEM_MC_FETCH_EFLAGS(uEFlags); \ 439 439 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \ 440 IEM_MC_COMMIT_EFLAGS (uEFlags); \440 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 441 441 } IEM_MC_NATIVE_ELSE() { \ 442 442 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ -
trunk/src/VBox/VMM/VMMAll/IEMAllInstOneByte.cpp.h
r103801 r103828 102 102 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u8Dst, u8Src, uEFlags, 8); \ 103 103 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_RM(pVCpu, a_bRm), u8Dst); \ 104 IEM_MC_COMMIT_EFLAGS (uEFlags); \104 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 105 105 } IEM_MC_NATIVE_ELSE() { \ 106 106 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ … … 186 186 IEM_MC_FETCH_EFLAGS(uEFlags); \ 187 187 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u8Dst, u8Src, uEFlags, 8); \ 188 IEM_MC_COMMIT_EFLAGS (uEFlags); \188 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 189 189 } IEM_MC_NATIVE_ELSE() { \ 190 190 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ … … 219 219 IEM_MC_FETCH_EFLAGS(uEFlags); \ 220 220 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_EmitterBasename,_r_r_efl), u8Dst, u8SrcEmit, uEFlags, 8); \ 221 IEM_MC_COMMIT_EFLAGS (uEFlags); \221 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 222 222 } IEM_MC_NATIVE_ELSE() { \ 223 223 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 266 266 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u8Dst, u8Src, uEFlags, 8); \ 267 267 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_REG(pVCpu, a_bRm), u8Dst); \ 268 IEM_MC_COMMIT_EFLAGS (uEFlags); \268 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 269 269 } IEM_MC_NATIVE_ELSE() { \ 270 270 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ … … 296 296 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u8Dst, u8Src, uEFlags, 8); \ 297 297 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_REG(pVCpu, a_bRm), u8Dst); \ 298 IEM_MC_COMMIT_EFLAGS (uEFlags); \298 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 299 299 } IEM_MC_NATIVE_ELSE() { \ 300 300 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ … … 329 329 IEM_MC_FETCH_EFLAGS(uEFlags); \ 330 330 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u8Dst, u8Src, uEFlags, 8); \ 331 IEM_MC_COMMIT_EFLAGS (uEFlags); \331 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 332 332 } IEM_MC_NATIVE_ELSE() { \ 333 333 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ … … 358 358 IEM_MC_FETCH_EFLAGS(uEFlags); \ 359 359 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u8Dst, u8Src, uEFlags, 8); \ 360 IEM_MC_COMMIT_EFLAGS (uEFlags); \360 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 361 361 } IEM_MC_NATIVE_ELSE() { \ 362 362 IEM_MC_ARG(uint8_t *, pu8Dst, 0); \ … … 397 397 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \ 398 398 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_RM(pVCpu, a_bRm), u16Dst); \ 399 IEM_MC_COMMIT_EFLAGS (uEFlags); \399 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 400 400 } IEM_MC_NATIVE_ELSE() { \ 401 401 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 422 422 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \ 423 423 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_RM(pVCpu, a_bRm), u32Dst); \ 424 IEM_MC_COMMIT_EFLAGS (uEFlags); \424 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 425 425 } IEM_MC_NATIVE_ELSE() { \ 426 426 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 448 448 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \ 449 449 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_RM(pVCpu, a_bRm), u64Dst); \ 450 IEM_MC_COMMIT_EFLAGS (uEFlags); \450 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 451 451 } IEM_MC_NATIVE_ELSE() { \ 452 452 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ … … 640 640 IEM_MC_FETCH_EFLAGS(uEFlags); \ 641 641 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16Src, uEFlags, 16); \ 642 IEM_MC_COMMIT_EFLAGS (uEFlags); \642 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 643 643 } IEM_MC_NATIVE_ELSE() { \ 644 644 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 664 664 IEM_MC_FETCH_EFLAGS(uEFlags); \ 665 665 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32Src, uEFlags, 32); \ 666 IEM_MC_COMMIT_EFLAGS (uEFlags); \666 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 667 667 } IEM_MC_NATIVE_ELSE() { \ 668 668 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 688 688 IEM_MC_FETCH_EFLAGS(uEFlags); \ 689 689 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64Src, uEFlags, 64); \ 690 IEM_MC_COMMIT_EFLAGS (uEFlags); \690 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 691 691 } IEM_MC_NATIVE_ELSE() { \ 692 692 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ … … 728 728 IEM_MC_FETCH_EFLAGS(uEFlags); \ 729 729 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u16Dst, u16SrcEmit, uEFlags, 16); \ 730 IEM_MC_COMMIT_EFLAGS (uEFlags); \730 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 731 731 } IEM_MC_NATIVE_ELSE() { \ 732 732 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 759 759 IEM_MC_FETCH_EFLAGS(uEFlags); \ 760 760 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u32Dst, u32SrcEmit, uEFlags, 32); \ 761 IEM_MC_COMMIT_EFLAGS (uEFlags); \761 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 762 762 } IEM_MC_NATIVE_ELSE() { \ 763 763 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 790 790 IEM_MC_FETCH_EFLAGS(uEFlags); \ 791 791 IEM_MC_NATIVE_EMIT_4(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_r_efl), u64Dst, u64SrcEmit, uEFlags, 64); \ 792 IEM_MC_COMMIT_EFLAGS (uEFlags); \792 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 793 793 } IEM_MC_NATIVE_ELSE() { \ 794 794 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 833 833 IEM_MC_FETCH_EFLAGS(uEFlags); \ 834 834 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 835 IEM_MC_COMMIT_EFLAGS(uEFlags); \836 835 IEM_MC_STORE_GREG_U8(X86_GREG_xAX, u8Dst); \ 836 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 837 837 } IEM_MC_NATIVE_ELSE() { \ 838 838 IEM_MC_ARG_CONST(uint8_t, u8Src,/*=*/ u8Imm, 1); \ … … 864 864 IEM_MC_FETCH_EFLAGS(uEFlags); \ 865 865 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 866 IEM_MC_COMMIT_EFLAGS(uEFlags); \867 866 IEM_MC_STORE_GREG_U16(X86_GREG_xAX, u16Dst); \ 867 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 868 868 } IEM_MC_NATIVE_ELSE() { \ 869 869 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1); \ … … 889 889 IEM_MC_FETCH_EFLAGS(uEFlags); \ 890 890 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 891 IEM_MC_COMMIT_EFLAGS(uEFlags); \892 891 IEM_MC_STORE_GREG_U32(X86_GREG_xAX, u32Dst); \ 892 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 893 893 } IEM_MC_NATIVE_ELSE() { \ 894 894 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1); \ … … 915 915 IEM_MC_FETCH_EFLAGS(uEFlags); \ 916 916 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 917 IEM_MC_COMMIT_EFLAGS(uEFlags); \918 917 IEM_MC_STORE_GREG_U64(X86_GREG_xAX, u64Dst); \ 918 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 919 919 } IEM_MC_NATIVE_ELSE() { \ 920 920 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1); \ … … 951 951 IEM_MC_FETCH_EFLAGS(uEFlags); \ 952 952 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 953 IEM_MC_COMMIT_EFLAGS (uEFlags); \953 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 954 954 } IEM_MC_NATIVE_ELSE() { \ 955 955 IEM_MC_ARG_CONST(uint16_t, u16Src,/*=*/ u16Imm, 1); \ … … 975 975 IEM_MC_FETCH_EFLAGS(uEFlags); \ 976 976 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 977 IEM_MC_COMMIT_EFLAGS (uEFlags); \977 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 978 978 } IEM_MC_NATIVE_ELSE() { \ 979 979 IEM_MC_ARG_CONST(uint32_t, u32Src,/*=*/ u32Imm, 1); \ … … 999 999 IEM_MC_FETCH_EFLAGS(uEFlags); \ 1000 1000 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 1001 IEM_MC_COMMIT_EFLAGS (uEFlags); \1001 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 1002 1002 } IEM_MC_NATIVE_ELSE() { \ 1003 1003 IEM_MC_ARG_CONST(uint64_t, u64Src,/*=*/ u64Imm, 1); \ … … 4244 4244 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 4245 4245 IEM_MC_STORE_GREG_U8(IEM_GET_MODRM_RM(pVCpu, bRm), u8Dst); \ 4246 IEM_MC_COMMIT_EFLAGS (uEFlags); \4246 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4247 4247 } IEM_MC_NATIVE_ELSE() { \ 4248 4248 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \ … … 4319 4319 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4320 4320 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 4321 IEM_MC_COMMIT_EFLAGS (uEFlags); \4321 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4322 4322 } IEM_MC_NATIVE_ELSE() { \ 4323 4323 IEM_MC_ARG_CONST(uint8_t, u8Src, /*=*/ u8Imm, 1); \ … … 4347 4347 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4348 4348 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u8Dst, u8Imm, uEFlags, 8, 8); \ 4349 IEM_MC_COMMIT_EFLAGS (uEFlags); \4349 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4350 4350 } IEM_MC_NATIVE_ELSE() { \ 4351 4351 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 4509 4509 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4510 4510 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 4511 IEM_MC_COMMIT_EFLAGS(uEFlags); \4512 4511 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_RM(pVCpu, bRm), u16Dst); \ 4512 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4513 4513 } IEM_MC_NATIVE_ELSE() { \ 4514 4514 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 4535 4535 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4536 4536 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 4537 IEM_MC_COMMIT_EFLAGS(uEFlags); \4538 4537 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_RM(pVCpu, bRm), u32Dst); \ 4538 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4539 4539 } IEM_MC_NATIVE_ELSE() { \ 4540 4540 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 4562 4562 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4563 4563 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 4564 IEM_MC_COMMIT_EFLAGS(uEFlags); \4565 4564 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm), u64Dst); \ 4565 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4566 4566 } IEM_MC_NATIVE_ELSE() { \ 4567 4567 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ … … 4769 4769 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4770 4770 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 4771 IEM_MC_COMMIT_EFLAGS (uEFlags); \4771 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4772 4772 } IEM_MC_NATIVE_ELSE() { \ 4773 4773 IEM_MC_ARG(uint16_t const *,pu16Dst, 0); \ … … 4794 4794 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4795 4795 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 4796 IEM_MC_COMMIT_EFLAGS (uEFlags); \4796 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4797 4797 } IEM_MC_NATIVE_ELSE() { \ 4798 4798 IEM_MC_ARG(uint32_t const *,pu32Dst, 0); \ … … 4819 4819 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4820 4820 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 4821 IEM_MC_COMMIT_EFLAGS (uEFlags); \4821 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4822 4822 } IEM_MC_NATIVE_ELSE() { \ 4823 4823 IEM_MC_ARG(uint64_t const *,pu64Dst, 0); \ … … 4856 4856 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4857 4857 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, u16Imm, uEFlags, 16, 16); \ 4858 IEM_MC_COMMIT_EFLAGS (uEFlags); \4858 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4859 4859 } IEM_MC_NATIVE_ELSE() { \ 4860 4860 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 4886 4886 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4887 4887 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, u32Imm, uEFlags, 32, 32); \ 4888 IEM_MC_COMMIT_EFLAGS (uEFlags); \4888 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4889 4889 } IEM_MC_NATIVE_ELSE() { \ 4890 4890 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 4916 4916 IEM_MC_FETCH_EFLAGS(uEFlags); \ 4917 4917 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, u64Imm, uEFlags, 64, 32); \ 4918 IEM_MC_COMMIT_EFLAGS (uEFlags); \4918 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 4919 4919 } IEM_MC_NATIVE_ELSE() { \ 4920 4920 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 5097 5097 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags, 16, 8); \ 5098 5098 IEM_MC_STORE_GREG_U16(IEM_GET_MODRM_RM(pVCpu, bRm), u16Dst); \ 5099 IEM_MC_COMMIT_EFLAGS (uEFlags); \5099 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5100 5100 } IEM_MC_NATIVE_ELSE() { \ 5101 5101 IEM_MC_ARG(uint16_t *, pu16Dst, 0); \ … … 5120 5120 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags, 32, 8); \ 5121 5121 IEM_MC_STORE_GREG_U32(IEM_GET_MODRM_RM(pVCpu, bRm), u32Dst); \ 5122 IEM_MC_COMMIT_EFLAGS (uEFlags); \5122 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5123 5123 } IEM_MC_NATIVE_ELSE() { \ 5124 5124 IEM_MC_ARG(uint32_t *, pu32Dst, 0); \ … … 5144 5144 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags, 64, 8); \ 5145 5145 IEM_MC_STORE_GREG_U64(IEM_GET_MODRM_RM(pVCpu, bRm), u64Dst); \ 5146 IEM_MC_COMMIT_EFLAGS (uEFlags); \5146 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5147 5147 } IEM_MC_NATIVE_ELSE() { \ 5148 5148 IEM_MC_ARG(uint64_t *, pu64Dst, 0); \ … … 5339 5339 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5340 5340 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags, 16, 8); \ 5341 IEM_MC_COMMIT_EFLAGS (uEFlags); \5341 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5342 5342 } IEM_MC_NATIVE_ELSE() { \ 5343 5343 IEM_MC_ARG(uint16_t const *, pu16Dst, 0); \ … … 5361 5361 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5362 5362 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags, 32, 8); \ 5363 IEM_MC_COMMIT_EFLAGS (uEFlags); \5363 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5364 5364 } IEM_MC_NATIVE_ELSE() { \ 5365 5365 IEM_MC_ARG(uint32_t const *, pu32Dst, 0); \ … … 5383 5383 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5384 5384 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags, 64, 8); \ 5385 IEM_MC_COMMIT_EFLAGS (uEFlags); \5385 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5386 5386 } IEM_MC_NATIVE_ELSE() { \ 5387 5387 IEM_MC_ARG(uint64_t const *, pu64Dst, 0); \ … … 5420 5420 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5421 5421 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u16Dst, (uint16_t)(int16_t)(int8_t)u8Imm, uEFlags, 16, 8); \ 5422 IEM_MC_COMMIT_EFLAGS (uEFlags); \5422 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5423 5423 } IEM_MC_NATIVE_ELSE() { \ 5424 5424 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 5448 5448 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5449 5449 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u32Dst, (uint32_t)(int32_t)(int8_t)u8Imm, uEFlags, 32, 8); \ 5450 IEM_MC_COMMIT_EFLAGS (uEFlags); \5450 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5451 5451 } IEM_MC_NATIVE_ELSE() { \ 5452 5452 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 5476 5476 IEM_MC_FETCH_EFLAGS(uEFlags); \ 5477 5477 IEM_MC_NATIVE_EMIT_5(RT_CONCAT3(iemNativeEmit_,a_InsNm,_r_i_efl), u64Dst, (uint64_t)(int64_t)(int8_t)u8Imm, uEFlags, 64, 8); \ 5478 IEM_MC_COMMIT_EFLAGS (uEFlags); \5478 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); \ 5479 5479 } IEM_MC_NATIVE_ELSE() { \ 5480 5480 IEM_MC_LOCAL(uint8_t, bUnmapInfo); \ … … 5650 5650 IEM_MC_FETCH_EFLAGS(uEFlags); 5651 5651 IEM_MC_NATIVE_EMIT_4(iemNativeEmit_test_r_r_efl, u8Src, u8Src, uEFlags, 8); 5652 IEM_MC_COMMIT_EFLAGS (uEFlags);5652 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); 5653 5653 } IEM_MC_NATIVE_ELSE() { 5654 5654 IEM_MC_ARG(uint8_t *, pu8Dst, 0); … … 5696 5696 IEM_MC_FETCH_EFLAGS(uEFlags); 5697 5697 IEM_MC_NATIVE_EMIT_4(iemNativeEmit_test_r_r_efl, u16Src, u16Src, uEFlags, 16); 5698 IEM_MC_COMMIT_EFLAGS (uEFlags);5698 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); 5699 5699 } IEM_MC_NATIVE_ELSE() { 5700 5700 IEM_MC_ARG(uint16_t *, pu16Dst, 0); … … 5718 5718 IEM_MC_FETCH_EFLAGS(uEFlags); 5719 5719 IEM_MC_NATIVE_EMIT_4(iemNativeEmit_test_r_r_efl, u32Src, u32Src, uEFlags, 32); 5720 IEM_MC_COMMIT_EFLAGS (uEFlags);5720 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); 5721 5721 } IEM_MC_NATIVE_ELSE() { 5722 5722 IEM_MC_ARG(uint32_t *, pu32Dst, 0); … … 5740 5740 IEM_MC_FETCH_EFLAGS(uEFlags); 5741 5741 IEM_MC_NATIVE_EMIT_4(iemNativeEmit_test_r_r_efl, u64Src, u64Src, uEFlags, 64); 5742 IEM_MC_COMMIT_EFLAGS (uEFlags);5742 IEM_MC_COMMIT_EFLAGS_OPT(uEFlags); 5743 5743 } IEM_MC_NATIVE_ELSE() { 5744 5744 IEM_MC_ARG(uint64_t *, pu64Dst, 0); -
trunk/src/VBox/VMM/VMMAll/IEMAllInstPython.py
r103825 r103828 3000 3000 'IEM_MC_CLEAR_YREG_128_UP': (McBlock.parseMcGeneric, True, True, g_fNativeSimd), 3001 3001 'IEM_MC_COMMIT_EFLAGS': (McBlock.parseMcGeneric, True, True, True, ), 3002 'IEM_MC_COMMIT_EFLAGS_OPT': (McBlock.parseMcGeneric, True, True, True, ), 3002 3003 'IEM_MC_COPY_XREG_U128': (McBlock.parseMcGeneric, True, True, g_fNativeSimd), 3003 3004 'IEM_MC_COPY_YREG_U128_ZX_VLMAX': (McBlock.parseMcGeneric, True, True, False, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veLiveness.cpp
r103787 r103828 488 488 & ~(uint32_t)(X86_EFL_STATUS_BITS | X86_EFL_DF | X86_EFL_VM | X86_EFL_VIF | X86_EFL_IOPL))); \ 489 489 } while (0) 490 #undef IEM_MC_COMMIT_EFLAGS_OPT /* unused here */ 491 #define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) \ 492 IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) 490 493 491 494 #define IEM_MC_ASSIGN_TO_SMALLER(a_VarDst, a_VarSrcEol) NOP() -
trunk/src/VBox/VMM/VMMAll/IEMAllN8vePython.py
r103768 r103828 122 122 'IEM_MC_REF_EFLAGS_EX': (None, False, False, True, ), 123 123 'IEM_MC_COMMIT_EFLAGS_EX': (None, True, True, True, ), 124 'IEM_MC_COMMIT_EFLAGS_OPT_EX': (None, True, True, True, ), 124 125 'IEM_MC_FETCH_EFLAGS_EX': (None, False, False, True, ), 125 126 'IEM_MC_ASSERT_EFLAGS': (None, True, True, True, ), -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompFuncs.h
r103826 r103828 1284 1284 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagAnysBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 1285 1285 { 1286 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl); 1286 1287 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1287 1288 … … 1310 1311 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagNoBitsSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitsInEfl) 1311 1312 { 1313 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitsInEfl); 1312 1314 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1313 1315 … … 1336 1338 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 1337 1339 { 1340 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 1338 1341 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1339 1342 … … 1365 1368 DECL_INLINE_THROW(uint32_t) iemNativeEmitIfEflagsBitNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl) 1366 1369 { 1370 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 1367 1371 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1368 1372 … … 1400 1404 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 1401 1405 { 1406 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBit1InEfl | fBit2InEfl); 1402 1407 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1403 1408 … … 1473 1478 uint32_t fBit1InEfl, uint32_t fBit2InEfl, bool fInverted) 1474 1479 { 1480 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl | fBit1InEfl | fBit2InEfl); 1475 1481 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1476 1482 … … 1658 1664 iemNativeEmitIfCxIsNotOneAndTestEflagsBit(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fBitInEfl, bool fCheckIfSet) 1659 1665 { 1666 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 1660 1667 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1661 1668 … … 1719 1726 uint32_t fBitInEfl, bool fCheckIfSet, bool f64Bit) 1720 1727 { 1728 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fBitInEfl); 1721 1729 PIEMNATIVECOND const pEntry = iemNativeCondPushIf(pReNative, &off); 1722 1730 … … 1941 1949 1942 1950 { 1951 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS); 1952 1943 1953 /* 1944 1954 * Do all the call setup and cleanup. … … 3310 3320 #endif 3311 3321 3322 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fEflInput); 3323 3312 3324 /** @todo this is suboptimial. EFLAGS is probably shadowed and we should use 3313 3325 * the existing shadow copy. */ … … 3327 3339 #define IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) \ 3328 3340 IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \ 3329 off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput) 3341 off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput, true /*fUpdateSkipping*/) 3342 3343 #undef IEM_MC_COMMIT_EFLAGS_OPT /* should not be used */ 3344 #define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) \ 3345 IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \ 3346 off = iemNativeEmitCommitEFlags(pReNative, off, a_EFlags, a_fEflOutput, false /*fUpdateSkipping*/) 3330 3347 3331 3348 /** Handles IEM_MC_COMMIT_EFLAGS_EX. */ 3332 3349 DECL_INLINE_THROW(uint32_t) 3333 iemNativeEmitCommitEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags, uint32_t fEflOutput) 3350 iemNativeEmitCommitEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarEFlags, uint32_t fEflOutput, 3351 bool fUpdateSkipping) 3334 3352 { 3335 3353 RT_NOREF(fEflOutput); … … 3351 3369 3352 3370 /** @todo validate that only bits in the fElfOutput mask changed. */ 3371 #endif 3372 3373 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 3374 if (fUpdateSkipping) 3375 { 3376 if ((fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS) 3377 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 3378 else 3379 off = iemNativeEmitAndImmIntoVCpuU32(pReNative, off, ~(fEflOutput & X86_EFL_STATUS_BITS), 3380 RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 3381 } 3382 #else 3383 RT_NOREF_PV(fUpdateSkipping); 3353 3384 #endif 3354 3385 … … 3491 3522 #define IEM_MC_REF_EFLAGS_EX(a_pEFlags, a_fEflInput, a_fEflOutput) \ 3492 3523 IEMNATIVE_EFLAGS_OPTIMIZATION_STATS(a_fEflInput, a_fEflOutput); \ 3493 off = iemNativeEmitRefEFlags(pReNative, off, a_pEFlags )3524 off = iemNativeEmitRefEFlags(pReNative, off, a_pEFlags, a_fEflInput, a_fEflOutput) 3494 3525 3495 3526 /** Handles IEM_MC_REF_EFLAGS. */ 3496 3527 DECL_INLINE_THROW(uint32_t) 3497 iemNativeEmitRefEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRef )3528 iemNativeEmitRefEFlags(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVarRef, uint32_t fEflInput, uint32_t fEflOutput) 3498 3529 { 3499 3530 iemNativeVarSetKindToGstRegRef(pReNative, idxVarRef, kIemNativeGstRegRef_EFlags, 0); 3500 3531 IEMNATIVE_ASSERT_VAR_SIZE(pReNative, idxVarRef, sizeof(void *)); 3532 3533 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 3534 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, fEflInput); 3535 3536 /* Updating the skipping according to the outputs is a little early, but 3537 we don't have any other hooks for references atm. */ 3538 if ((fEflOutput & X86_EFL_STATUS_BITS) == X86_EFL_STATUS_BITS) 3539 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 3540 else if (fEflOutput & X86_EFL_STATUS_BITS) 3541 off = iemNativeEmitAndImmIntoVCpuU32(pReNative, off, ~(fEflOutput & X86_EFL_STATUS_BITS), 3542 RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 3543 #else 3544 RT_NOREF(fEflInput, fEflOutput); 3545 #endif 3501 3546 3502 3547 /* If we've delayed writing back the register value, flush it now. */ -
trunk/src/VBox/VMM/VMMAll/IEMAllN8veRecompiler.cpp
r103827 r103828 6125 6125 #endif /* VBOX_STRICT */ 6126 6126 6127 6128 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 6129 /** 6130 * Worker for IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK. 6131 */ 6132 DECL_HIDDEN_THROW(uint32_t) 6133 iemNativeEmitEFlagsSkippingCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fEflNeeded) 6134 { 6135 uint32_t const offVCpu = RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags); 6136 6137 fEflNeeded &= X86_EFL_STATUS_BITS; 6138 if (fEflNeeded) 6139 { 6140 # ifdef RT_ARCH_AMD64 6141 /* test dword [pVCpu + offVCpu], imm32 */ 6142 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 6143 if (fEflNeeded <= 0xff) 6144 { 6145 pCodeBuf[off++] = 0xf6; 6146 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu); 6147 pCodeBuf[off++] = RT_BYTE1(fEflNeeded); 6148 } 6149 else 6150 { 6151 pCodeBuf[off++] = 0xf7; 6152 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu); 6153 pCodeBuf[off++] = RT_BYTE1(fEflNeeded); 6154 pCodeBuf[off++] = RT_BYTE2(fEflNeeded); 6155 pCodeBuf[off++] = RT_BYTE3(fEflNeeded); 6156 pCodeBuf[off++] = RT_BYTE4(fEflNeeded); 6157 } 6158 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 6159 6160 # else 6161 uint8_t const idxRegTmp = iemNativeRegAllocTmp(pReNative, &off); 6162 off = iemNativeEmitLoadGprFromVCpuU32(pReNative, off, idxRegTmp, offVCpu); 6163 off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, iGprSrc, fEflNeeded); 6164 # ifdef RT_ARCH_ARM64 6165 off = iemNativeEmitJzToFixed(pReNative, off, off + 2); 6166 off = iemNativeEmitBrk(pReNative, off, 0x7777); 6167 # else 6168 # error "Port me!" 6169 # endif 6170 # endif 6171 } 6172 return off; 6173 } 6174 #endif /* IEMNATIVE_STRICT_EFLAGS_SKIPPING */ 6175 6176 6127 6177 /** 6128 6178 * Emits a code for checking the return code of a call and rcPassUp, returning … … 6361 6411 iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry) 6362 6412 { 6413 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS); 6414 6363 6415 /* We don't know what the threaded function is doing so we must flush all pending writes. */ 6364 6416 off = iemNativeRegFlushPendingWrites(pReNative, off); … … 6739 6791 uint32_t const idxReturn = iemNativeLabelCreate(pReNative, kIemNativeLabelType_Return, off); 6740 6792 *pidxReturnLabel = idxReturn; 6793 6794 IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(pReNative, off, X86_EFL_STATUS_BITS); 6741 6795 6742 6796 /* -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdPython.py
r103807 r103828 1104 1104 1105 1105 # Add EFLAGS usage annotations to relevant MCs. 1106 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS', 'IEM_MC_FETCH_EFLAGS'): 1106 elif oNewStmt.sName in ('IEM_MC_COMMIT_EFLAGS', 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 1107 'IEM_MC_FETCH_EFLAGS'): 1107 1108 oInstruction = self.oParent.oMcBlock.oInstruction; 1108 1109 oNewStmt.sName += '_EX'; … … 1996 1997 1997 1998 # Collect MCs working on EFLAGS. Caller will check this. 1998 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 'IEM_MC_REF_EFLAGS',1999 'IEM_MC_ ARG_LOCAL_EFLAGS', ):1999 if oStmt.sName in ('IEM_MC_FETCH_EFLAGS', 'IEM_MC_FETCH_EFLAGS_U8', 'IEM_MC_COMMIT_EFLAGS', 2000 'IEM_MC_COMMIT_EFLAGS_OPT', 'IEM_MC_REF_EFLAGS', 'IEM_MC_ARG_LOCAL_EFLAGS', ): 2000 2001 dEflStmts[oStmt.sName] = oStmt; 2001 2002 elif isinstance(oStmt, iai.McStmtCall): … … 2060 2061 if len(dEflStmts) != 1 or not sMcNames.startswith('IEM_MC_CALL_CIMPL_'): # Hack for far calls 2061 2062 self.error('Uses %s but has no @opflmodify, @opfltest or @opflclass with details!' % (sMcNames,), oGenerator); 2062 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts :2063 elif 'IEM_MC_COMMIT_EFLAGS' in dEflStmts or 'IEM_MC_COMMIT_EFLAGS_OPT' in dEflStmts: 2063 2064 if not oInstruction.asFlModify: 2064 2065 if oInstruction.sMnemonic not in [ 'not', ]: 2065 self.error('Uses IEM_MC_COMMIT_EFLAGS but has no flags in @opflmodify!', oGenerator);2066 self.error('Uses IEM_MC_COMMIT_EFLAGS[_OPT] but has no flags in @opflmodify!', oGenerator); 2066 2067 elif ( 'IEM_MC_CALL_CIMPL_0' in dEflStmts 2067 2068 or 'IEM_MC_CALL_CIMPL_1' in dEflStmts -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdRecompiler.cpp
r103612 r103828 93 93 94 94 #include "IEMThreadedFunctions.h" 95 #ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER 96 # include "IEMN8veRecompiler.h" 97 #endif 95 98 96 99 … … 2611 2614 pVCpu->iem.s.pvTbFramePointerR3 = NULL; 2612 2615 # endif 2616 # ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 2617 Assert(pVCpu->iem.s.fSkippingEFlags == 0); 2618 # endif 2613 2619 if (RT_LIKELY( rcStrict == VINF_SUCCESS 2614 2620 && pVCpu->iem.s.rcPassUp == VINF_SUCCESS /** @todo this isn't great. */)) -
trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllN8veEmit-x86.h
r103801 r103828 207 207 ) 208 208 { 209 #ifdef IEMNATIVE_WITH_ LIVENESS_ANALYSIS210 if (1) /** @todo check if all bits are clobbered.*/209 #ifdef IEMNATIVE_WITH_EFLAGS_SKIPPING 210 /** @todo */ 211 211 #endif 212 212 { … … 276 276 #endif 277 277 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 278 279 # ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 280 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, 0, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 281 # endif 278 282 } 279 283 return off; … … 294 298 ) 295 299 { 296 #ifdef IEMNATIVE_WITH_LIVENESS_ANALYSIS 297 if (1) /** @todo check if all bits are clobbered. */ 298 #endif 299 { 300 #ifdef IEMNATIVE_WITH_EFLAGS_SKIPPING 301 /* 302 * See if we can skip this wholesale. 303 */ 304 PCIEMLIVENESSENTRY const pLivenessEntry = &pReNative->paLivenessEntries[pReNative->idxCurCall]; 305 if (IEMLIVENESS_STATE_ARE_STATUS_EFL_TO_BE_CLOBBERED(pLivenessEntry)) 306 { 307 STAM_COUNTER_INC(&pReNative->pVCpu->iem.s.StatNativeEflArithmeticSkipped); 308 # ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 309 off = iemNativeEmitOrImmIntoVCpuU32(pReNative, off, X86_EFL_STATUS_BITS, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 310 # endif 311 } 312 else 313 #endif 314 { 315 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 316 uint32_t fSkipped = 0; 317 #endif 300 318 #ifdef RT_ARCH_AMD64 301 319 /* … … 446 464 iemNativeVarRegisterRelease(pReNative, idxVarEfl); 447 465 iemNativeRegFreeTmp(pReNative, idxTmpReg); 466 448 467 #else 449 468 # error "port me" 450 469 #endif 451 470 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 471 472 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 473 off = iemNativeEmitStoreImmToVCpuU32(pReNative, off, fSkipped, RT_UOFFSETOF(VMCPU, iem.s.fSkippingEFlags)); 474 #endif 452 475 } 453 476 return off; -
trunk/src/VBox/VMM/VMMR3/IEMR3.cpp
r103739 r103828 462 462 "Times liveness info helped finding the return register in iemNativeRegAllocFindFree.", 463 463 "/IEM/CPU%u/re/NativeRegFindFreeLivenessHelped", idCpu); 464 465 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeEflArithmeticSkipped, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, 466 "Skipped all status flag updating, arithmetic instruction", 467 "/IEM/CPU%u/re/NativeEFlagsArithmeticSkipped", idCpu); 464 468 465 469 STAMR3RegisterF(pVM, &pVCpu->iem.s.StatNativeLivenessEflCfSkippable, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Skippable EFLAGS.CF updating", "/IEM/CPU%u/re/NativeLivenessEFlagsCfSkippable", idCpu); -
trunk/src/VBox/VMM/include/IEMInternal.h
r103813 r103828 1786 1786 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */ 1787 1787 RTGCPHYS GCPhysInstrBufPrev; 1788 /** Copy of IEMCPU::GCPhysInstrBuf after decoding a branch instruction. 1789 * This is used together with fTbBranched and GCVirtTbBranchSrcBuf to determin 1790 * whether a branch instruction jumps to a new page or stays within the 1791 * current one. */ 1792 RTGCPHYS GCPhysTbBranchSrcBufUnused; 1793 /** Copy of IEMCPU::uInstrBufPc after decoding a branch instruction. */ 1794 uint64_t GCVirtTbBranchSrcBufUnused; 1788 /** Strict: Tracking skipped EFLAGS calculations. Any bits set here are 1789 * currently not up to date in EFLAGS. */ 1790 uint32_t fSkippingEFlags; 1791 uint32_t au32Padding[1]; 1795 1792 /** Pointer to the ring-3 TB allocator for this EMT. */ 1796 1793 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3; … … 1855 1852 * iemNativeRegAllocFindFree. */ 1856 1853 STAMCOUNTER StatNativeRegFindFreeLivenessHelped; 1854 1855 /** Native recompiler: Number of times status flags calc has been skipped. */ 1856 STAMCOUNTER StatNativeEflArithmeticSkipped; 1857 1857 1858 1858 /** Native recompiler: Number of opportunities to skip EFLAGS.CF updating. */ -
trunk/src/VBox/VMM/include/IEMMc.h
r103787 r103828 219 219 Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); \ 220 220 } while (0) 221 #define IEM_MC_COMMIT_EFLAGS_OPT(a_EFlags) IEM_MC_COMMIT_EFLAGS(a_EFlags) 222 #define IEM_MC_COMMIT_EFLAGS_OPT_EX(a_EFlags, a_fEflInput, a_fEflOutput) IEM_MC_COMMIT_EFLAGS_EX(a_EFlags, a_fEflInput, a_fEflOutput) 221 223 222 224 /** ASSUMES the source variable not used after this statement. */ -
trunk/src/VBox/VMM/include/IEMN8veRecompiler.h
r103812 r103828 49 49 # define IEMNATIVE_WITH_LIVENESS_ANALYSIS 50 50 /*# define IEMLIVENESS_EXTENDED_LAYOUT*/ 51 #endif 52 53 /** @def IEMNATIVE_WITH_EFLAGS_SKIPPING 54 * Enables skipping EFLAGS calculations/updating based on liveness info. */ 55 #if (defined(IEMNATIVE_WITH_LIVENESS_ANALYSIS) && 1) || defined(DOXYGEN_RUNNING) 56 # define IEMNATIVE_WITH_EFLAGS_SKIPPING 57 #endif 58 59 60 /** @def IEMNATIVE_STRICT_EFLAGS_SKIPPING 61 * Enables strict consistency checks around EFLAGS skipping. 62 * @note Only defined when IEMNATIVE_WITH_EFLAGS_SKIPPING is also defined. */ 63 #if (defined(VBOX_STRICT) && defined(IEMNATIVE_WITH_EFLAGS_SKIPPING)) || defined(DOXYGEN_RUNNING) 64 # define IEMNATIVE_STRICT_EFLAGS_SKIPPING 51 65 #endif 52 66 … … 561 575 562 576 #define IEMLIVENESSBIT_ALL_EFL_MASK UINT64_C(0x003f800000000000) 577 #define IEMLIVENESSBIT_STATUS_EFL_MASK UINT64_C(0x003f000000000000) 563 578 564 579 #ifndef IEMLIVENESS_EXTENDED_LAYOUT … … 638 653 * include INPUT if the register is used in more than one place. */ 639 654 # define IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(a_uState) ((uint32_t)(a_uState) != IEMLIVENESS_STATE_UNUSED) 655 656 /** Check if all status flags are going to be clobbered and doesn't need 657 * calculating in the current step. 658 * @param a_pEntry The current liveness entry. */ 659 # define IEMLIVENESS_STATE_ARE_STATUS_EFL_TO_BE_CLOBBERED(a_pCurEntry) \ 660 ( (((a_pCurEntry)->Bit0.bm64 | (a_pCurEntry)->Bit1.bm64) & IEMLIVENESSBIT_STATUS_EFL_MASK) == 0 ) 640 661 641 662 #else /* IEMLIVENESS_EXTENDED_LAYOUT */ … … 659 680 # define IEMLIVENESS_STATE_IS_INPUT_EXPECTED(a_uState) RT_BOOL((a_uState) & IEMLIVENESS_STATE_READ) 660 681 # define IEMLIVENESS_STATE_IS_CLOBBER_EXPECTED(a_uState) RT_BOOL((a_uState) & IEMLIVENESS_STATE_WRITE) 682 683 # define IEMLIVENESS_STATE_ARE_STATUS_EFL_TO_BE_CLOBBERED(a_pCurEntry) \ 684 ( ((a_pCurEntry)->aBits[IEMLIVENESS_BIT_WRITE].bm64 & IEMLIVENESSBIT_STATUS_EFL_MASK) == IEMLIVENESSBIT_STATUS_EFL_MASK \ 685 && !( ((a_pCurEntry)->aBits[IEMLIVENESS_BIT_READ].bm64 | (a_pCurEntry)->aBits[IEMLIVENESS_BIT_POT_XCPT_OR_CALL].bm64) \ 686 & IEMLIVENESSBIT_STATUS_EFL_MASK) ) 687 661 688 #endif /* IEMLIVENESS_EXTENDED_LAYOUT */ 662 689 /** @} */ … … 751 778 #endif 752 779 /** @} */ 780 781 /** @def IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK 782 * Checks that the EFLAGS bits specified by @a a_fEflNeeded are actually 783 * calculated and up to date. This is to double check that we haven't skipped 784 * EFLAGS calculations when we actually need them. NOP in non-strict builds. 785 * @note has to be placed in 786 */ 787 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 788 # define IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(a_pReNative, a_off, a_fEflNeeded) \ 789 do { (a_off) = iemNativeEmitEFlagsSkippingCheck(a_pReNative, a_off, a_fEflNeeded); } while (0) 790 #else 791 # define IEMNATIVE_STRICT_EFLAGS_SKIPPING_EMIT_CHECK(a_pReNative, a_off, a_fEflNeeded) do { } while (0) 792 #endif 793 753 794 754 795 /** … … 1507 1548 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitExecFlagsCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fExec); 1508 1549 #endif 1550 #ifdef IEMNATIVE_STRICT_EFLAGS_SKIPPING 1551 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitEFlagsSkippingCheck(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fEflNeeded); 1552 #endif 1509 1553 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr); 1510 1554 DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCallCommon(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs, uint8_t cHiddenArgs); -
trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h
r103822 r103828 830 830 831 831 /** 832 * Emits a store of an immediate value to a 32-bit VCpu field. 833 * 834 * @note ARM64: Will allocate temporary registers. 835 */ 836 DECL_FORCE_INLINE_THROW(uint32_t) 837 iemNativeEmitStoreImmToVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t uImm, uint32_t offVCpu) 838 { 839 #ifdef RT_ARCH_AMD64 840 /* mov mem32, imm32 */ 841 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 842 pCodeBuf[off++] = 0xc7; 843 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu); 844 pCodeBuf[off++] = RT_BYTE1(uImm); 845 pCodeBuf[off++] = RT_BYTE2(uImm); 846 pCodeBuf[off++] = RT_BYTE3(uImm); 847 pCodeBuf[off++] = RT_BYTE4(uImm); 848 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 849 850 #elif defined(RT_ARCH_ARM64) 851 uint8_t const idxRegImm = uImm == 0 ? ARMV8_A64_REG_XZR : iemNativeRegAllocTmpImm(pReNative, &off, uImm); 852 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, idxRegImm, offVCpu, kArmv8A64InstrLdStType_St_Word, sizeof(uint32_t)); 853 if (idxRegImm != ARMV8_A64_REG_XZR) 854 iemNativeRegFreeTmpImm(pReNative, idxRegImm); 855 856 #else 857 # error "port me" 858 #endif 859 return off; 860 } 861 862 863 864 /** 832 865 * Emits a store of an immediate value to a 16-bit VCpu field. 833 866 * … … 1116 1149 #elif defined(RT_ARCH_ARM64) 1117 1150 off = iemNativeEmitIncU32CounterInVCpuEx(iemNativeInstrBufEnsure(pReNative, off, 4+3), off, idxTmp1, idxTmp2, offVCpu); 1151 #else 1152 # error "port me" 1153 #endif 1154 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1155 return off; 1156 } 1157 1158 1159 /** 1160 * Emits code for OR'ing a bitmask into a 32-bit VMCPU member. 1161 * 1162 * @note May allocate temporary registers (not AMD64). 1163 */ 1164 DECL_FORCE_INLINE(uint32_t) 1165 iemNativeEmitOrImmIntoVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fMask, uint32_t offVCpu) 1166 { 1167 Assert(!(offVCpu & 3)); /* ASSUME correctly aligned member. */ 1168 #ifdef RT_ARCH_AMD64 1169 /* or dword [pVCpu + offVCpu], imm8/32 */ 1170 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 1171 if (fMask < 0x80) 1172 { 1173 pCodeBuf[off++] = 0x83; 1174 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, offVCpu); 1175 pCodeBuf[off++] = (uint8_t)fMask; 1176 } 1177 else 1178 { 1179 pCodeBuf[off++] = 0x81; 1180 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, offVCpu); 1181 pCodeBuf[off++] = RT_BYTE1(fMask); 1182 pCodeBuf[off++] = RT_BYTE2(fMask); 1183 pCodeBuf[off++] = RT_BYTE3(fMask); 1184 pCodeBuf[off++] = RT_BYTE4(fMask); 1185 } 1186 1187 #elif defined(RT_ARCH_ARM64) 1188 /* If the constant is unwieldy we'll need a register to hold it as well. */ 1189 uint32_t uImmSizeLen, uImmRotate; 1190 uint8_t const idxTmpMask = Armv8A64ConvertMask32ToImmRImmS(fMask, &uImmSizeLen, &uImmRotate) ? UINT8_MAX 1191 : iemNativeRegAllocTmpImm(pReNative, &off, fMask); 1192 1193 /* We need a temp register for holding the member value we're modifying. */ 1194 uint8_t const idxTmpValue = iemNativeRegAllocTmp(pReNative, &off); 1195 1196 /* Determine how we're to access pVCpu first. */ 1197 uint32_t const cbData = sizeof(uint32_t); 1198 if (offVCpu < (unsigned)(_4K * cbData)) 1199 { 1200 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */ 1201 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1202 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxTmpValue, 1203 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData); 1204 if (idxTmpMask == UINT8_MAX) 1205 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/); 1206 else 1207 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/); 1208 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, idxTmpValue, 1209 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData); 1210 } 1211 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData)) 1212 { 1213 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1214 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX, 1215 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData); 1216 if (idxTmpMask == UINT8_MAX) 1217 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/); 1218 else 1219 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/); 1220 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX, 1221 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData); 1222 } 1223 else 1224 { 1225 /* The offset is too large, so we must load it into a register and use 1226 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. We'll try use the 'LSL, #2' feature 1227 of the instruction if that'll reduce the constant to 16-bits. */ 1228 uint8_t const idxTmpIndex = iemNativeRegAllocTmp(pReNative, &off); 1229 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5); 1230 bool const fShifted = offVCpu / cbData < (unsigned)UINT16_MAX; 1231 if (fShifted) 1232 pCodeBuf[off++] = Armv8A64MkInstrMovZ(idxTmpIndex, offVCpu / cbData); 1233 else 1234 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmpIndex, offVCpu); 1235 1236 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU, 1237 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/); 1238 1239 if (idxTmpMask == UINT8_MAX) 1240 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/); 1241 else 1242 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/); 1243 1244 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU, 1245 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/); 1246 iemNativeRegFreeTmp(pReNative, idxTmpIndex); 1247 } 1248 iemNativeRegFreeTmp(pReNative, idxTmpValue); 1249 if (idxTmpMask != UINT8_MAX) 1250 iemNativeRegFreeTmp(pReNative, idxTmpMask); 1251 1252 #else 1253 # error "port me" 1254 #endif 1255 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off); 1256 return off; 1257 } 1258 1259 1260 /** 1261 * Emits code for AND'ing a bitmask into a 32-bit VMCPU member. 1262 * 1263 * @note May allocate temporary registers (not AMD64). 1264 */ 1265 DECL_FORCE_INLINE(uint32_t) 1266 iemNativeEmitAndImmIntoVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fMask, uint32_t offVCpu) 1267 { 1268 Assert(!(offVCpu & 3)); /* ASSUME correctly aligned member. */ 1269 #ifdef RT_ARCH_AMD64 1270 /* and dword [pVCpu + offVCpu], imm8/32 */ 1271 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10); 1272 if (fMask < 0x80) 1273 { 1274 pCodeBuf[off++] = 0x83; 1275 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 4, offVCpu); 1276 pCodeBuf[off++] = (uint8_t)fMask; 1277 } 1278 else 1279 { 1280 pCodeBuf[off++] = 0x81; 1281 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 4, offVCpu); 1282 pCodeBuf[off++] = RT_BYTE1(fMask); 1283 pCodeBuf[off++] = RT_BYTE2(fMask); 1284 pCodeBuf[off++] = RT_BYTE3(fMask); 1285 pCodeBuf[off++] = RT_BYTE4(fMask); 1286 } 1287 1288 #elif defined(RT_ARCH_ARM64) 1289 /* If the constant is unwieldy we'll need a register to hold it as well. */ 1290 uint32_t uImmSizeLen, uImmRotate; 1291 uint8_t const idxTmpMask = Armv8A64ConvertMask32ToImmRImmS(fMask, &uImmSizeLen, &uImmRotate) ? UINT8_MAX 1292 : iemNativeRegAllocTmpImm(pReNative, &off, fMask); 1293 1294 /* We need a temp register for holding the member value we're modifying. */ 1295 uint8_t const idxTmpValue = iemNativeRegAllocTmp(pReNative, &off); 1296 1297 /* Determine how we're to access pVCpu first. */ 1298 uint32_t const cbData = sizeof(uint32_t); 1299 if (offVCpu < (unsigned)(_4K * cbData)) 1300 { 1301 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */ 1302 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1303 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxTmpValue, 1304 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData); 1305 if (idxTmpMask == UINT8_MAX) 1306 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/); 1307 else 1308 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/); 1309 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, idxTmpValue, 1310 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData); 1311 } 1312 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData)) 1313 { 1314 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3); 1315 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX, 1316 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData); 1317 if (idxTmpMask == UINT8_MAX) 1318 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/); 1319 else 1320 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/); 1321 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX, 1322 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData); 1323 } 1324 else 1325 { 1326 /* The offset is too large, so we must load it into a register and use 1327 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. We'll try use the 'LSL, #2' feature 1328 of the instruction if that'll reduce the constant to 16-bits. */ 1329 uint8_t const idxTmpIndex = iemNativeRegAllocTmp(pReNative, &off); 1330 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5); 1331 bool const fShifted = offVCpu / cbData < (unsigned)UINT16_MAX; 1332 if (fShifted) 1333 pCodeBuf[off++] = Armv8A64MkInstrMovZ(idxTmpIndex, offVCpu / cbData); 1334 else 1335 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmpIndex, offVCpu); 1336 1337 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU, 1338 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/); 1339 1340 if (idxTmpMask == UINT8_MAX) 1341 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/); 1342 else 1343 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/); 1344 1345 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU, 1346 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/); 1347 iemNativeRegFreeTmp(pReNative, idxTmpIndex); 1348 } 1349 iemNativeRegFreeTmp(pReNative, idxTmpValue); 1350 if (idxTmpMask != UINT8_MAX) 1351 iemNativeRegFreeTmp(pReNative, idxTmpMask); 1352 1118 1353 #else 1119 1354 # error "port me" … … 5839 6074 * 5840 6075 * Please also note that on x86 it is necessary pass off + 256 or higher 5841 * for @a offTarget one believe the intervening code is more than 1276076 * for @a offTarget if one believe the intervening code is more than 127 5842 6077 * bytes long. 5843 6078 */ -
trunk/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
r103787 r103828 668 668 669 669 #define IEM_MC_COMMIT_EFLAGS(a_EFlags) do { CHK_TYPE(uint32_t, a_EFlags); (void)fMcBegin; } while (0) 670 #define IEM_MC_COMMIT_EFLAGS_OPT(a_EFlags) do { CHK_TYPE(uint32_t, a_EFlags); (void)fMcBegin; } while (0) 670 671 #define IEM_MC_ASSIGN_TO_SMALLER(a_VarOrArg, a_CVariableOrConst) do { (a_VarOrArg) = (0); (void)fMcBegin; } while (0) 671 672
Note:
See TracChangeset
for help on using the changeset viewer.