Changeset 100848 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 10, 2023 12:07:24 AM (18 months ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h
r100826 r100848 201 201 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++; 202 202 # endif 203 Log 8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));203 Log9(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem)); 204 204 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */ 205 205 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN); -
trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmplInline.cpp.h
r100826 r100848 369 369 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 370 370 { 371 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 372 /* 373 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 374 */ 375 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 376 # if TMPL_MEM_TYPE_SIZE > 1 377 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE))) 378 # endif 379 { 380 /* 381 * TLB lookup. 382 */ 383 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 384 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 385 if (pTlbe->uTag == uTag) 386 { 387 /* 388 * Check TLB page table level access flags. 389 */ 390 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 391 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 392 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 393 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ 394 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE 395 | fNoUser)) 396 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 397 { 398 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 399 400 # if TMPL_MEM_TYPE_ALIGN != 0 401 /* 402 * Alignment check: 403 */ 404 /** @todo check priority \#AC vs \#PF */ 405 AssertCompile(X86_CR0_AM == X86_EFL_AC); 406 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 407 if ( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 408 || !( (uint32_t)pVCpu->cpum.GstCtx.cr0 409 & pVCpu->cpum.GstCtx.eflags.u 410 & ((IEM_GET_CPL(pVCpu) + 1U) << 16) /* IEM_GET_CPL(pVCpu) == 3 ? X86_CR0_AM : 0 */ 411 & X86_CR0_AM)) 412 # endif 413 { 414 /* 415 * Return the address. 416 */ 417 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 418 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 419 *pbUnmapInfo = 0; 420 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n", 421 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK])); 422 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 423 } 424 # if TMPL_MEM_TYPE_ALIGN != 0 425 Log10Func(("Raising #AC for %RGv\n", GCPtrEff)); 426 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 427 # endif 428 } 429 } 430 } 431 432 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 433 outdated page pointer, or other troubles. (This will do a TLB load.) */ 434 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem)); 435 # endif 371 436 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 372 437 } … … 380 445 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 381 446 { 447 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 448 /* 449 * Check that the address doesn't cross a page boundrary. 450 */ 451 # if TMPL_MEM_TYPE_SIZE > 1 452 if (RT_LIKELY((GCPtrMem & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE))) 453 # endif 454 { 455 /* 456 * TLB lookup. 457 */ 458 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem); 459 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 460 if (pTlbe->uTag == uTag) 461 { 462 /* 463 * Check TLB page table level access flags. 464 */ 465 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 466 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 467 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 468 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ 469 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE 470 | fNoUser)) 471 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 472 { 473 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 474 475 # if TMPL_MEM_TYPE_ALIGN != 0 476 /* 477 * Alignment check: 478 */ 479 /** @todo check priority \#AC vs \#PF */ 480 AssertCompile(X86_CR0_AM == X86_EFL_AC); 481 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 482 if ( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) 483 || !( (uint32_t)pVCpu->cpum.GstCtx.cr0 484 & pVCpu->cpum.GstCtx.eflags.u 485 & ((IEM_GET_CPL(pVCpu) + 1U) << 16) /* IEM_GET_CPL(pVCpu) == 3 ? X86_CR0_AM : 0 */ 486 & X86_CR0_AM)) 487 # endif 488 { 489 /* 490 * Return the address. 491 */ 492 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 493 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 494 *pbUnmapInfo = 0; 495 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 496 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 497 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 498 } 499 # if TMPL_MEM_TYPE_ALIGN != 0 500 Log10Func(("Raising #AC for %RGv\n", GCPtrMem)); 501 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 502 # endif 503 } 504 } 505 } 506 507 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 508 outdated page pointer, or other troubles. (This will do a TLB load.) */ 509 Log10Func(("%RGv falling back\n", GCPtrMem)); 510 # endif 382 511 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 383 512 } … … 391 520 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 392 521 { 522 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 523 /* 524 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 525 */ 526 RTGCPTR GCPtrEff = iemMemApplySegmentToWriteJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 527 # if TMPL_MEM_TYPE_SIZE > 1 528 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE))) 529 # endif 530 { 531 /* 532 * TLB lookup. 533 */ 534 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 535 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 536 if (pTlbe->uTag == uTag) 537 { 538 /* 539 * Check TLB page table level access flags. 540 */ 541 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 542 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 543 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 544 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 545 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE 546 | fNoUser)) 547 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 548 { 549 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 550 551 # if TMPL_MEM_TYPE_ALIGN != 0 552 /* 553 * Alignment check: 554 */ 555 /** @todo check priority \#AC vs \#PF */ 556 AssertCompile(X86_CR0_AM == X86_EFL_AC); 557 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 558 if ( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 559 || !( (uint32_t)pVCpu->cpum.GstCtx.cr0 560 & pVCpu->cpum.GstCtx.eflags.u 561 & ((IEM_GET_CPL(pVCpu) + 1U) << 16) /* IEM_GET_CPL(pVCpu) == 3 ? X86_CR0_AM : 0 */ 562 & X86_CR0_AM)) 563 # endif 564 { 565 /* 566 * Return the address. 567 */ 568 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 569 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 570 *pbUnmapInfo = 0; 571 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n", 572 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK])); 573 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 574 } 575 # if TMPL_MEM_TYPE_ALIGN != 0 576 Log10Func(("Raising #AC for %RGv\n", GCPtrEff)); 577 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 578 # endif 579 } 580 } 581 } 582 583 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 584 outdated page pointer, or other troubles. (This will do a TLB load.) */ 585 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem)); 586 # endif 393 587 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 394 588 } … … 402 596 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 403 597 { 598 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 599 /* 600 * Check that the address doesn't cross a page boundrary. 601 */ 602 # if TMPL_MEM_TYPE_SIZE > 1 603 if (RT_LIKELY((GCPtrMem & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE))) 604 # endif 605 { 606 /* 607 * TLB lookup. 608 */ 609 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem); 610 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 611 if (pTlbe->uTag == uTag) 612 { 613 /* 614 * Check TLB page table level access flags. 615 */ 616 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 617 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 618 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 619 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_WRITE 620 | IEMTLBE_F_PT_NO_ACCESSED | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_PT_NO_WRITE 621 | fNoUser)) 622 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 623 { 624 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 625 626 # if TMPL_MEM_TYPE_ALIGN != 0 627 /* 628 * Alignment check: 629 */ 630 /** @todo check priority \#AC vs \#PF */ 631 AssertCompile(X86_CR0_AM == X86_EFL_AC); 632 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 633 if ( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) 634 || !( (uint32_t)pVCpu->cpum.GstCtx.cr0 635 & pVCpu->cpum.GstCtx.eflags.u 636 & ((IEM_GET_CPL(pVCpu) + 1U) << 16) /* IEM_GET_CPL(pVCpu) == 3 ? X86_CR0_AM : 0 */ 637 & X86_CR0_AM)) 638 # endif 639 { 640 /* 641 * Return the address. 642 */ 643 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 644 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 645 *pbUnmapInfo = 0; 646 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 647 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 648 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 649 } 650 # if TMPL_MEM_TYPE_ALIGN != 0 651 Log10Func(("Raising #AC for %RGv\n", GCPtrMem)); 652 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 653 # endif 654 } 655 } 656 } 657 658 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 659 outdated page pointer, or other troubles. (This will do a TLB load.) */ 660 Log10Func(("%RGv falling back\n", GCPtrMem)); 661 # endif 404 662 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 405 663 } … … 413 671 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 414 672 { 673 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 674 /* 675 * Convert from segmented to flat address and check that it doesn't cross a page boundrary. 676 */ 677 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(TMPL_MEM_TYPE), GCPtrMem); 678 # if TMPL_MEM_TYPE_SIZE > 1 679 if (RT_LIKELY((GCPtrEff & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE))) 680 # endif 681 { 682 /* 683 * TLB lookup. 684 */ 685 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrEff); 686 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 687 if (pTlbe->uTag == uTag) 688 { 689 /* 690 * Check TLB page table level access flags. 691 */ 692 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 693 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 694 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 695 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ 696 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser)) 697 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 698 { 699 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 700 701 # if TMPL_MEM_TYPE_ALIGN != 0 702 /* 703 * Alignment check: 704 */ 705 /** @todo check priority \#AC vs \#PF */ 706 AssertCompile(X86_CR0_AM == X86_EFL_AC); 707 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 708 if ( !(GCPtrEff & TMPL_MEM_TYPE_ALIGN) 709 || !( (uint32_t)pVCpu->cpum.GstCtx.cr0 710 & pVCpu->cpum.GstCtx.eflags.u 711 & ((IEM_GET_CPL(pVCpu) + 1U) << 16) /* IEM_GET_CPL(pVCpu) == 3 ? X86_CR0_AM : 0 */ 712 & X86_CR0_AM)) 713 # endif 714 { 715 /* 716 * Return the address. 717 */ 718 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 719 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 720 *pbUnmapInfo = 0; 721 Log9(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv: %p\n", 722 iSegReg, GCPtrMem, &pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK])); 723 return (TMPL_MEM_TYPE *)&pTlbe->pbMappingR3[GCPtrEff & GUEST_PAGE_OFFSET_MASK]; 724 } 725 # if TMPL_MEM_TYPE_ALIGN != 0 726 Log10Func(("Raising #AC for %RGv\n", GCPtrEff)); 727 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 728 # endif 729 } 730 } 731 } 732 733 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 734 outdated page pointer, or other troubles. (This will do a TLB load.) */ 735 Log10Func(("%u:%RGv falling back\n", iSegReg, GCPtrMem)); 736 # endif 415 737 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, iSegReg, GCPtrMem); 416 738 } … … 424 746 RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP 425 747 { 748 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) && !defined(TMPL_MEM_NO_INLINE) 749 /* 750 * Check that the address doesn't cross a page boundrary. 751 */ 752 # if TMPL_MEM_TYPE_SIZE > 1 753 if (RT_LIKELY((GCPtrMem & GUEST_PAGE_OFFSET_MASK) <= GUEST_PAGE_SIZE - sizeof(TMPL_MEM_TYPE))) 754 # endif 755 { 756 /* 757 * TLB lookup. 758 */ 759 uint64_t const uTag = IEMTLB_CALC_TAG( &pVCpu->iem.s.DataTlb, GCPtrMem); 760 PIEMTLBENTRY pTlbe = IEMTLB_TAG_TO_ENTRY(&pVCpu->iem.s.DataTlb, uTag); 761 if (pTlbe->uTag == uTag) 762 { 763 /* 764 * Check TLB page table level access flags. 765 */ 766 AssertCompile(IEMTLBE_F_PT_NO_USER == 4); 767 uint64_t const fNoUser = (IEM_GET_CPL(pVCpu) + 1) & IEMTLBE_F_PT_NO_USER; 768 if ( (pTlbe->fFlagsAndPhysRev & ( IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 769 | IEMTLBE_F_PG_UNASSIGNED | IEMTLBE_F_PG_NO_READ 770 | IEMTLBE_F_PT_NO_ACCESSED | fNoUser)) 771 == pVCpu->iem.s.DataTlb.uTlbPhysRev) 772 { 773 STAM_STATS({pVCpu->iem.s.DataTlb.cTlbHits++;}); 774 775 # if TMPL_MEM_TYPE_ALIGN != 0 776 /* 777 * Alignment check: 778 */ 779 /** @todo check priority \#AC vs \#PF */ 780 AssertCompile(X86_CR0_AM == X86_EFL_AC); 781 AssertCompile(((3U + 1U) << 16) == X86_CR0_AM); 782 if ( !(GCPtrMem & TMPL_MEM_TYPE_ALIGN) 783 || !( (uint32_t)pVCpu->cpum.GstCtx.cr0 784 & pVCpu->cpum.GstCtx.eflags.u 785 & ((IEM_GET_CPL(pVCpu) + 1U) << 16) /* IEM_GET_CPL(pVCpu) == 3 ? X86_CR0_AM : 0 */ 786 & X86_CR0_AM)) 787 # endif 788 { 789 /* 790 * Return the address. 791 */ 792 Assert(pTlbe->pbMappingR3); /* (Only ever cleared by the owning EMT.) */ 793 Assert(!((uintptr_t)pTlbe->pbMappingR3 & GUEST_PAGE_OFFSET_MASK)); 794 *pbUnmapInfo = 0; 795 Log8(("IEM RO/map " TMPL_MEM_FMT_DESC " %RGv: %p\n", 796 GCPtrMem, &pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK])); 797 return (TMPL_MEM_TYPE const *)&pTlbe->pbMappingR3[GCPtrMem & GUEST_PAGE_OFFSET_MASK]; 798 } 799 # if TMPL_MEM_TYPE_ALIGN != 0 800 Log10Func(("Raising #AC for %RGv\n", GCPtrMem)); 801 iemRaiseAlignmentCheckExceptionJmp(pVCpu); 802 # endif 803 } 804 } 805 } 806 807 /* Fall back on the slow careful approach in case of TLB miss, MMIO, exception 808 outdated page pointer, or other troubles. (This will do a TLB load.) */ 809 Log10Func(("%RGv falling back\n", GCPtrMem)); 810 # endif 426 811 return RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(pVCpu, pbUnmapInfo, UINT8_MAX, GCPtrMem); 427 812 } -
trunk/src/VBox/VMM/include/IEMInline.h
r100847 r100848 3421 3421 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRwJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3422 3422 { 3423 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 3424 if (RT_LIKELY(bMapInfo == 0)) 3425 return; 3426 # endif 3423 3427 iemMemCommitAndUnmapRwSafeJmp(pVCpu, pvMem, bMapInfo); 3424 3428 } … … 3427 3431 DECL_INLINE_THROW(void) iemMemCommitAndUnmapWoJmp(PVMCPUCC pVCpu, void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3428 3432 { 3433 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 3434 if (RT_LIKELY(bMapInfo == 0)) 3435 return; 3436 # endif 3429 3437 iemMemCommitAndUnmapWoSafeJmp(pVCpu, pvMem, bMapInfo); 3430 3438 } … … 3433 3441 DECL_INLINE_THROW(void) iemMemCommitAndUnmapRoJmp(PVMCPUCC pVCpu, const void *pvMem, uint8_t bMapInfo) IEM_NOEXCEPT_MAY_LONGJMP 3434 3442 { 3443 # if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3) 3444 if (RT_LIKELY(bMapInfo == 0)) 3445 return; 3446 # endif 3435 3447 iemMemCommitAndUnmapRoSafeJmp(pVCpu, pvMem, bMapInfo); 3436 3448 }
Note:
See TracChangeset
for help on using the changeset viewer.