Changeset 89407 in vbox
- Timestamp:
- May 31, 2021 4:32:28 PM (4 years ago)
- svn:sync-xref-src-repo-rev:
- 144759
- Location:
- trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/iommu-intel.h
r89365 r89407 517 517 518 518 519 /** @name Second-Level P aging Entry.519 /** @name Second-Level PML5E. 520 520 * In accordance with the Intel spec. 521 521 * @{ */ 522 522 /** R: Read. */ 523 #define VTD_BF_SL P_ENTRY_R_SHIFT0524 #define VTD_BF_SL P_ENTRY_R_MASKUINT64_C(0x0000000000000001)523 #define VTD_BF_SL_PML5E_R_SHIFT 0 524 #define VTD_BF_SL_PML5E_R_MASK UINT64_C(0x0000000000000001) 525 525 /** W: Write. */ 526 #define VTD_BF_SL P_ENTRY_W_SHIFT1527 #define VTD_BF_SL P_ENTRY_W_MASKUINT64_C(0x0000000000000002)526 #define VTD_BF_SL_PML5E_W_SHIFT 1 527 #define VTD_BF_SL_PML5E_W_MASK UINT64_C(0x0000000000000002) 528 528 /** X: Execute. */ 529 #define VTD_BF_SL P_ENTRY_X_SHIFT2530 #define VTD_BF_SL P_ENTRY_X_MASKUINT64_C(0x0000000000000004)529 #define VTD_BF_SL_PML5E_X_SHIFT 2 530 #define VTD_BF_SL_PML5E_X_MASK UINT64_C(0x0000000000000004) 531 531 /** IGN: Ignored (bits 6:3). */ 532 #define VTD_BF_SL P_ENTRY_IGN_6_3_SHIFT3533 #define VTD_BF_SL P_ENTRY_IGN_6_3_MASKUINT64_C(0x0000000000000078)532 #define VTD_BF_SL_PML5E_IGN_6_3_SHIFT 3 533 #define VTD_BF_SL_PML5E_IGN_6_3_MASK UINT64_C(0x0000000000000078) 534 534 /** R: Reserved (bit 7). */ 535 #define VTD_BF_SL P_ENTRY_RSVD_7_SHIFT7536 #define VTD_BF_SL P_ENTRY_RSVD_7_MASKUINT64_C(0x0000000000000080)535 #define VTD_BF_SL_PML5E_RSVD_7_SHIFT 7 536 #define VTD_BF_SL_PML5E_RSVD_7_MASK UINT64_C(0x0000000000000080) 537 537 /** A: Accessed. */ 538 #define VTD_BF_SL P_ENTRY_A_SHIFT8539 #define VTD_BF_SL P_ENTRY_A_MASKUINT64_C(0x0000000000000100)538 #define VTD_BF_SL_PML5E_A_SHIFT 8 539 #define VTD_BF_SL_PML5E_A_MASK UINT64_C(0x0000000000000100) 540 540 /** IGN: Ignored (bits 10:9). */ 541 #define VTD_BF_SL P_ENTRY_IGN_10_9_SHIFT9542 #define VTD_BF_SL P_ENTRY_IGN_10_9_MASKUINT64_C(0x0000000000000600)541 #define VTD_BF_SL_PML5E_IGN_10_9_SHIFT 9 542 #define VTD_BF_SL_PML5E_IGN_10_9_MASK UINT64_C(0x0000000000000600) 543 543 /** R: Reserved (bit 11). */ 544 #define VTD_BF_SL P_ENTRY_RSVD_11_SHIFT11545 #define VTD_BF_SL P_ENTRY_RSVD_11_MASKUINT64_C(0x0000000000000800)544 #define VTD_BF_SL_PML5E_RSVD_11_SHIFT 11 545 #define VTD_BF_SL_PML5E_RSVD_11_MASK UINT64_C(0x0000000000000800) 546 546 /** ADDR: Address. */ 547 #define VTD_BF_SL P_ENTRY_ADDR_SHIFT12548 #define VTD_BF_SL P_ENTRY_ADDR_MASKUINT64_C(0x000ffffffffff000)547 #define VTD_BF_SL_PML5E_ADDR_SHIFT 12 548 #define VTD_BF_SL_PML5E_ADDR_MASK UINT64_C(0x000ffffffffff000) 549 549 /** IGN: Ignored (bits 61:52). */ 550 #define VTD_BF_SL P_ENTRY_IGN_61_52_SHIFT52551 #define VTD_BF_SL P_ENTRY_IGN_61_52_MASKUINT64_C(0x3ff0000000000000)550 #define VTD_BF_SL_PML5E_IGN_61_52_SHIFT 52 551 #define VTD_BF_SL_PML5E_IGN_61_52_MASK UINT64_C(0x3ff0000000000000) 552 552 /** R: Reserved (bit 62). */ 553 #define VTD_BF_SL P_ENTRY_RSVD_62_SHIFT62554 #define VTD_BF_SL P_ENTRY_RSVD_62_MASKUINT64_C(0x4000000000000000)553 #define VTD_BF_SL_PML5E_RSVD_62_SHIFT 62 554 #define VTD_BF_SL_PML5E_RSVD_62_MASK UINT64_C(0x4000000000000000) 555 555 /** IGN: Ignored (bit 63). */ 556 #define VTD_BF_SL P_ENTRY_IGN_63_SHIFT63557 #define VTD_BF_SL P_ENTRY_IGN_63_MASKUINT64_C(0x8000000000000000)558 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL P_ENTRY_, UINT64_C(0), UINT64_MAX,556 #define VTD_BF_SL_PML5E_IGN_63_SHIFT 63 557 #define VTD_BF_SL_PML5E_IGN_63_MASK UINT64_C(0x8000000000000000) 558 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL_PML5E_, UINT64_C(0), UINT64_MAX, 559 559 (R, W, X, IGN_6_3, RSVD_7, A, IGN_10_9, RSVD_11, ADDR, IGN_61_52, RSVD_62, IGN_63)); 560 560 561 /** SL-PML5E: Valid mask. */ 562 #define VTD_SLP_PML5E_VALID_MASK ( VTD_BF_SLP_ENTRY_R_MASK | VTD_BF_SLP_ENTRY_W_MASK \ 563 | VTD_BF_SLP_ENTRY_X_MASK | VTD_BF_SLP_ENTRY_IGN_6_3_MASK \ 564 | VTD_BF_SLP_ENTRY_A_MASK | VTD_BF_SLP_ENTRY_IGN_10_9_MASK \ 565 | VTD_BF_SLP_ENTRY_ADDR_MASK | VTD_BF_SLP_ENTRY_IGN_61_52_MASK \ 566 | VTD_BF_SLP_ENTRY_IGN_63_MASK) 567 561 /** Second-level PML5E valid mask. */ 562 #define VTD_SL_PML5E_VALID_MASK ( VTD_BF_SL_PML5E_R_MASK | VTD_BF_SL_PML5E_W_MASK \ 563 | VTD_BF_SL_PML5E_X_MASK | VTD_BF_SL_PML5E_IGN_6_3_MASK \ 564 | VTD_BF_SL_PML5E_A_MASK | VTD_BF_SL_PML5E_IGN_10_9_MASK \ 565 | VTD_BF_SL_PML5E_ADDR_MASK | VTD_BF_SL_PML5E_IGN_61_52_MASK \ 566 | VTD_BF_SL_PML5E_IGN_63_MASK) 567 /** @} */ 568 569 570 /** @name Second-Level PML4E. 571 * In accordance with the Intel spec. 572 * @{ */ 573 /** R: Read. */ 574 #define VTD_BF_SL_PML4E_R_SHIFT 0 575 #define VTD_BF_SL_PML4E_R_MASK UINT64_C(0x0000000000000001) 576 /** W: Write. */ 577 #define VTD_BF_SL_PML4E_W_SHIFT 1 578 #define VTD_BF_SL_PML4E_W_MASK UINT64_C(0x0000000000000002) 579 /** X: Execute. */ 580 #define VTD_BF_SL_PML4E_X_SHIFT 2 581 #define VTD_BF_SL_PML4E_X_MASK UINT64_C(0x0000000000000004) 582 /** IGN: Ignored (bits 6:3). */ 583 #define VTD_BF_SL_PML4E_IGN_6_3_SHIFT 3 584 #define VTD_BF_SL_PML4E_IGN_6_3_MASK UINT64_C(0x0000000000000078) 585 /** R: Reserved (bit 7). */ 586 #define VTD_BF_SL_PML4E_RSVD_7_SHIFT 7 587 #define VTD_BF_SL_PML4E_RSVD_7_MASK UINT64_C(0x0000000000000080) 588 /** A: Accessed. */ 589 #define VTD_BF_SL_PML4E_A_SHIFT 8 590 #define VTD_BF_SL_PML4E_A_MASK UINT64_C(0x0000000000000100) 591 /** IGN: Ignored (bits 10:9). */ 592 #define VTD_BF_SL_PML4E_IGN_10_9_SHIFT 9 593 #define VTD_BF_SL_PML4E_IGN_10_9_MASK UINT64_C(0x0000000000000600) 594 /** R: Reserved (bit 11). */ 595 #define VTD_BF_SL_PML4E_RSVD_11_SHIFT 11 596 #define VTD_BF_SL_PML4E_RSVD_11_MASK UINT64_C(0x0000000000000800) 597 /** ADDR: Address. */ 598 #define VTD_BF_SL_PML4E_ADDR_SHIFT 12 599 #define VTD_BF_SL_PML4E_ADDR_MASK UINT64_C(0x000ffffffffff000) 600 /** IGN: Ignored (bits 61:52). */ 601 #define VTD_BF_SL_PML4E_IGN_61_52_SHIFT 52 602 #define VTD_BF_SL_PML4E_IGN_61_52_MASK UINT64_C(0x3ff0000000000000) 603 /** R: Reserved (bit 62). */ 604 #define VTD_BF_SL_PML4E_RSVD_62_SHIFT 62 605 #define VTD_BF_SL_PML4E_RSVD_62_MASK UINT64_C(0x4000000000000000) 606 /** IGN: Ignored (bit 63). */ 607 #define VTD_BF_SL_PML4E_IGN_63_SHIFT 63 608 #define VTD_BF_SL_PML4E_IGN_63_MASK UINT64_C(0x8000000000000000) 609 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL_PML4E_, UINT64_C(0), UINT64_MAX, 610 (R, W, X, IGN_6_3, RSVD_7, A, IGN_10_9, RSVD_11, ADDR, IGN_61_52, RSVD_62, IGN_63)); 611 612 /** Second-level PML4E valid mask. */ 613 #define VTD_SL_PML4E_VALID_MASK VTD_SL_PML5E_VALID_MASK 614 /** @} */ 615 616 617 /** @name Second-Level PDPE (1GB Page). 618 * In accordance with the Intel spec. 619 * @{ */ 620 /** R: Read. */ 621 #define VTD_BF_SL_PDPE1G_R_SHIFT 0 622 #define VTD_BF_SL_PDPE1G_R_MASK UINT64_C(0x0000000000000001) 623 /** W: Write. */ 624 #define VTD_BF_SL_PDPE1G_W_SHIFT 1 625 #define VTD_BF_SL_PDPE1G_W_MASK UINT64_C(0x0000000000000002) 626 /** X: Execute. */ 627 #define VTD_BF_SL_PDPE1G_X_SHIFT 2 628 #define VTD_BF_SL_PDPE1G_X_MASK UINT64_C(0x0000000000000004) 629 /** EMT: Extended Memory Type. */ 630 #define VTD_BF_SL_PDPE1G_EMT_SHIFT 3 631 #define VTD_BF_SL_PDPE1G_EMT_MASK UINT64_C(0x0000000000000038) 632 /** IPAT: Ignore PAT (Page Attribute Table). */ 633 #define VTD_BF_SL_PDPE1G_IPAT_SHIFT 6 634 #define VTD_BF_SL_PDPE1G_IPAT_MASK UINT64_C(0x0000000000000040) 635 /** PS: Page Size (MB1). */ 636 #define VTD_BF_SL_PDPE1G_PS_SHIFT 7 637 #define VTD_BF_SL_PDPE1G_PS_MASK UINT64_C(0x0000000000000080) 638 /** A: Accessed. */ 639 #define VTD_BF_SL_PDPE1G_A_SHIFT 8 640 #define VTD_BF_SL_PDPE1G_A_MASK UINT64_C(0x0000000000000100) 641 /** D: Dirty. */ 642 #define VTD_BF_SL_PDPE1G_D_SHIFT 9 643 #define VTD_BF_SL_PDPE1G_D_MASK UINT64_C(0x0000000000000200) 644 /** IGN: Ignored (bit 10). */ 645 #define VTD_BF_SL_PDPE1G_IGN_10_SHIFT 10 646 #define VTD_BF_SL_PDPE1G_IGN_10_MASK UINT64_C(0x0000000000000400) 647 /** R: Reserved (bit 11). */ 648 #define VTD_BF_SL_PDPE1G_RSVD_11_SHIFT 11 649 #define VTD_BF_SL_PDPE1G_RSVD_11_MASK UINT64_C(0x0000000000000800) 650 /** R: Reserved (bits 29:12). */ 651 #define VTD_BF_SL_PDPE1G_RSVD_29_12_SHIFT 12 652 #define VTD_BF_SL_PDPE1G_RSVD_29_12_MASK UINT64_C(0x000000003ffff000) 653 /** ADDR: Address of 1GB page. */ 654 #define VTD_BF_SL_PDPE1G_ADDR_SHIFT 30 655 #define VTD_BF_SL_PDPE1G_ADDR_MASK UINT64_C(0x000fffffc0000000) 656 /** IGN: Ignored (bits 61:52). */ 657 #define VTD_BF_SL_PDPE1G_IGN_61_52_SHIFT 52 658 #define VTD_BF_SL_PDPE1G_IGN_61_52_MASK UINT64_C(0x3ff0000000000000) 659 /** R: Reserved (bit 62). */ 660 #define VTD_BF_SL_PDPE1G_RSVD_62_SHIFT 62 661 #define VTD_BF_SL_PDPE1G_RSVD_62_MASK UINT64_C(0x4000000000000000) 662 /** IGN: Ignored (bit 63). */ 663 #define VTD_BF_SL_PDPE1G_IGN_63_SHIFT 63 664 #define VTD_BF_SL_PDPE1G_IGN_63_MASK UINT64_C(0x8000000000000000) 665 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL_PDPE1G_, UINT64_C(0), UINT64_MAX, 666 (R, W, X, EMT, IPAT, PS, A, D, IGN_10, RSVD_11, RSVD_29_12, ADDR, IGN_61_52, RSVD_62, IGN_63)); 667 668 /** Second-level PDPE (1GB Page) valid mask. */ 669 #define VTD_SL_PDPE1G_VALID_MASK ( VTD_BF_SL_PDPE1G_R_MASK | VTD_BF_SL_PDPE1G_W_MASK \ 670 | VTD_BF_SL_PDPE1G_X_MASK | VTD_BF_SL_PDPE1G_EMT_MASK \ 671 | VTD_BF_SL_PDPE1G_IPAT_MASK | VTD_BF_SL_PDPE1G_PS_MASK \ 672 | VTD_BF_SL_PDPE1G_A_MASK | VTD_BF_SL_PDPE1G_D_MASK \ 673 | VTD_BF_SL_PDPE1G_IGN_10_MASK | VTD_BF_SL_PDPE1G_ADDR_MASK \ 674 | VTD_BF_SL_PDPE1G_IGN_61_52_MASK | VTD_BF_SL_PDPE1G_IGN_63_MASK) 675 /** @} */ 676 677 678 /** @name Second-Level PDPE. 679 * In accordance with the Intel spec. 680 * @{ */ 681 /** R: Read. */ 682 #define VTD_BF_SL_PDPE_R_SHIFT 0 683 #define VTD_BF_SL_PDPE_R_MASK UINT64_C(0x0000000000000001) 684 /** W: Write. */ 685 #define VTD_BF_SL_PDPE_W_SHIFT 1 686 #define VTD_BF_SL_PDPE_W_MASK UINT64_C(0x0000000000000002) 687 /** X: Execute. */ 688 #define VTD_BF_SL_PDPE_X_SHIFT 2 689 #define VTD_BF_SL_PDPE_X_MASK UINT64_C(0x0000000000000004) 690 /** IGN: Ignored (bits 6:3). */ 691 #define VTD_BF_SL_PDPE_IGN_6_3_SHIFT 3 692 #define VTD_BF_SL_PDPE_IGN_6_3_MASK UINT64_C(0x0000000000000078) 693 /** PS: Page Size (MBZ). */ 694 #define VTD_BF_SL_PDPE_PS_SHIFT 7 695 #define VTD_BF_SL_PDPE_PS_MASK UINT64_C(0x0000000000000080) 696 /** A: Accessed. */ 697 #define VTD_BF_SL_PDPE_A_SHIFT 8 698 #define VTD_BF_SL_PDPE_A_MASK UINT64_C(0x0000000000000100) 699 /** IGN: Ignored (bits 10:9). */ 700 #define VTD_BF_SL_PDPE_IGN_10_9_SHIFT 9 701 #define VTD_BF_SL_PDPE_IGN_10_9_MASK UINT64_C(0x0000000000000600) 702 /** R: Reserved (bit 11). */ 703 #define VTD_BF_SL_PDPE_RSVD_11_SHIFT 11 704 #define VTD_BF_SL_PDPE_RSVD_11_MASK UINT64_C(0x0000000000000800) 705 /** ADDR: Address of second-level PDT. */ 706 #define VTD_BF_SL_PDPE_ADDR_SHIFT 12 707 #define VTD_BF_SL_PDPE_ADDR_MASK UINT64_C(0x000ffffffffff000) 708 /** IGN: Ignored (bits 61:52). */ 709 #define VTD_BF_SL_PDPE_IGN_61_52_SHIFT 52 710 #define VTD_BF_SL_PDPE_IGN_61_52_MASK UINT64_C(0x3ff0000000000000) 711 /** R: Reserved (bit 62). */ 712 #define VTD_BF_SL_PDPE_RSVD_62_SHIFT 62 713 #define VTD_BF_SL_PDPE_RSVD_62_MASK UINT64_C(0x4000000000000000) 714 /** IGN: Ignored (bit 63). */ 715 #define VTD_BF_SL_PDPE_IGN_63_SHIFT 63 716 #define VTD_BF_SL_PDPE_IGN_63_MASK UINT64_C(0x8000000000000000) 717 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL_PDPE_, UINT64_C(0), UINT64_MAX, 718 (R, W, X, IGN_6_3, PS, A, IGN_10_9, RSVD_11, ADDR, IGN_61_52, RSVD_62, IGN_63)); 719 720 /** Second-level PDPE valid mask. */ 721 #define VTD_SL_PDPE_VALID_MASK ( VTD_BF_SL_PDPE_R_MASK | VTD_BF_SL_PDPE_W_MASK \ 722 | VTD_BF_SL_PDPE_X_MASK | VTD_BF_SL_PDPE_IGN_6_3_MASK \ 723 | VTD_BF_SL_PDPE_PS_MASK | VTD_BF_SL_PDPE_A_MASK \ 724 | VTD_BF_SL_PDPE_IGN_10_9_MASK | VTD_BF_SL_PDPE_ADDR_MASK \ 725 | VTD_BF_SL_PDPE_IGN_61_52_MASK | VTD_BF_SL_PDPE_IGN_63_MASK) 726 /** @} */ 727 728 729 /** @name Second-Level PDE (2MB Page). 730 * In accordance with the Intel spec. 731 * @{ */ 732 /** R: Read. */ 733 #define VTD_BF_SL_PDE2M_R_SHIFT 0 734 #define VTD_BF_SL_PDE2M_R_MASK UINT64_C(0x0000000000000001) 735 /** W: Write. */ 736 #define VTD_BF_SL_PDE2M_W_SHIFT 1 737 #define VTD_BF_SL_PDE2M_W_MASK UINT64_C(0x0000000000000002) 738 /** X: Execute. */ 739 #define VTD_BF_SL_PDE2M_X_SHIFT 2 740 #define VTD_BF_SL_PDE2M_X_MASK UINT64_C(0x0000000000000004) 741 /** EMT: Extended Memory Type. */ 742 #define VTD_BF_SL_PDE2M_EMT_SHIFT 3 743 #define VTD_BF_SL_PDE2M_EMT_MASK UINT64_C(0x0000000000000038) 744 /** IPAT: Ignore PAT (Page Attribute Table). */ 745 #define VTD_BF_SL_PDE2M_IPAT_SHIFT 6 746 #define VTD_BF_SL_PDE2M_IPAT_MASK UINT64_C(0x0000000000000040) 747 /** PS: Page Size (MB1). */ 748 #define VTD_BF_SL_PDE2M_PS_SHIFT 7 749 #define VTD_BF_SL_PDE2M_PS_MASK UINT64_C(0x0000000000000080) 750 /** A: Accessed. */ 751 #define VTD_BF_SL_PDE2M_A_SHIFT 8 752 #define VTD_BF_SL_PDE2M_A_MASK UINT64_C(0x0000000000000100) 753 /** D: Dirty. */ 754 #define VTD_BF_SL_PDE2M_D_SHIFT 9 755 #define VTD_BF_SL_PDE2M_D_MASK UINT64_C(0x0000000000000200) 756 /** IGN: Ignored (bit 10). */ 757 #define VTD_BF_SL_PDE2M_IGN_10_SHIFT 10 758 #define VTD_BF_SL_PDE2M_IGN_10_MASK UINT64_C(0x0000000000000400) 759 /** R: Reserved (bit 11). */ 760 #define VTD_BF_SL_PDE2M_RSVD_11_SHIFT 11 761 #define VTD_BF_SL_PDE2M_RSVD_11_MASK UINT64_C(0x0000000000000800) 762 /** R: Reserved (bits 20:12). */ 763 #define VTD_BF_SL_PDE2M_RSVD_20_12_SHIFT 12 764 #define VTD_BF_SL_PDE2M_RSVD_20_12_MASK UINT64_C(0x00000000001ff000) 765 /** ADDR: Address of 2MB page. */ 766 #define VTD_BF_SL_PDE2M_ADDR_SHIFT 21 767 #define VTD_BF_SL_PDE2M_ADDR_MASK UINT64_C(0x000fffffffe00000) 768 /** IGN: Ignored (bits 61:52). */ 769 #define VTD_BF_SL_PDE2M_IGN_61_52_SHIFT 52 770 #define VTD_BF_SL_PDE2M_IGN_61_52_MASK UINT64_C(0x3ff0000000000000) 771 /** R: Reserved (bit 62). */ 772 #define VTD_BF_SL_PDE2M_RSVD_62_SHIFT 62 773 #define VTD_BF_SL_PDE2M_RSVD_62_MASK UINT64_C(0x4000000000000000) 774 /** IGN: Ignored (bit 63). */ 775 #define VTD_BF_SL_PDE2M_IGN_63_SHIFT 63 776 #define VTD_BF_SL_PDE2M_IGN_63_MASK UINT64_C(0x8000000000000000) 777 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL_PDE2M_, UINT64_C(0), UINT64_MAX, 778 (R, W, X, EMT, IPAT, PS, A, D, IGN_10, RSVD_11, RSVD_20_12, ADDR, IGN_61_52, RSVD_62, IGN_63)); 779 780 /** Second-level PDE (2MB page) valid mask. */ 781 #define VTD_SL_PDE2M_VALID_MASK ( VTD_BF_SL_PDE2M_R_MASK | VTD_BF_SL_PDE2M_W_MASK \ 782 | VTD_BF_SL_PDE2M_X_MASK | VTD_BF_SL_PDE2M_EMT_MASK \ 783 | VTD_BF_SL_PDE2M_IPAT_MASK | VTD_BF_SL_PDE2M_PS_MASK \ 784 | VTD_BF_SL_PDE2M_A_MASK | VTD_BF_SL_PDE2M_D_MASK \ 785 | VTD_BF_SL_PDE2M_IGN_10_MASK | VTD_BF_SL_PDE2M_ADDR_MASK \ 786 | VTD_BF_SL_PDE2M_IGN_61_52_MASK | VTD_BF_SL_PDE2M_IGN_63_MASK) 787 /** @} */ 788 789 790 /** @name Second-Level PDE. 791 * In accordance with the Intel spec. 792 * @{ */ 793 /** R: Read. */ 794 #define VTD_BF_SL_PDE_R_SHIFT 0 795 #define VTD_BF_SL_PDE_R_MASK UINT64_C(0x0000000000000001) 796 /** W: Write. */ 797 #define VTD_BF_SL_PDE_W_SHIFT 1 798 #define VTD_BF_SL_PDE_W_MASK UINT64_C(0x0000000000000002) 799 /** X: Execute. */ 800 #define VTD_BF_SL_PDE_X_SHIFT 2 801 #define VTD_BF_SL_PDE_X_MASK UINT64_C(0x0000000000000004) 802 /** IGN: Ignored (bits 6:3). */ 803 #define VTD_BF_SL_PDE_IGN_6_3_SHIFT 3 804 #define VTD_BF_SL_PDE_IGN_6_3_MASK UINT64_C(0x0000000000000078) 805 /** PS: Page Size (MBZ). */ 806 #define VTD_BF_SL_PDE_PS_SHIFT 7 807 #define VTD_BF_SL_PDE_PS_MASK UINT64_C(0x0000000000000080) 808 /** A: Accessed. */ 809 #define VTD_BF_SL_PDE_A_SHIFT 8 810 #define VTD_BF_SL_PDE_A_MASK UINT64_C(0x0000000000000100) 811 /** IGN: Ignored (bits 10:9). */ 812 #define VTD_BF_SL_PDE_IGN_10_9_SHIFT 9 813 #define VTD_BF_SL_PDE_IGN_10_9_MASK UINT64_C(0x0000000000000600) 814 /** R: Reserved (bit 11). */ 815 #define VTD_BF_SL_PDE_RSVD_11_SHIFT 11 816 #define VTD_BF_SL_PDE_RSVD_11_MASK UINT64_C(0x0000000000000800) 817 /** ADDR: Address of second-level PT. */ 818 #define VTD_BF_SL_PDE_ADDR_SHIFT 12 819 #define VTD_BF_SL_PDE_ADDR_MASK UINT64_C(0x000ffffffffff000) 820 /** IGN: Ignored (bits 61:52). */ 821 #define VTD_BF_SL_PDE_IGN_61_52_SHIFT 52 822 #define VTD_BF_SL_PDE_IGN_61_52_MASK UINT64_C(0x3ff0000000000000) 823 /** R: Reserved (bit 62). */ 824 #define VTD_BF_SL_PDE_RSVD_62_SHIFT 62 825 #define VTD_BF_SL_PDE_RSVD_62_MASK UINT64_C(0x4000000000000000) 826 /** IGN: Ignored (bit 63). */ 827 #define VTD_BF_SL_PDE_IGN_63_SHIFT 63 828 #define VTD_BF_SL_PDE_IGN_63_MASK UINT64_C(0x8000000000000000) 829 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL_PDE_, UINT64_C(0), UINT64_MAX, 830 (R, W, X, IGN_6_3, PS, A, IGN_10_9, RSVD_11, ADDR, IGN_61_52, RSVD_62, IGN_63)); 831 832 /** Second-level PDE valid mask. */ 833 #define VTD_SL_PDE_VALID_MASK ( VTD_BF_SL_PDE_R_MASK | VTD_BF_SL_PDE_W_MASK \ 834 | VTD_BF_SL_PDE_X_MASK | VTD_BF_SL_PDE_IGN_6_3_MASK \ 835 | VTD_BF_SL_PDE_PS_MASK | VTD_BF_SL_PDE_A_MASK \ 836 | VTD_BF_SL_PDE_IGN_10_9_MASK | VTD_BF_SL_PDE_ADDR_MASK \ 837 | VTD_BF_SL_PDE_IGN_61_52_MASK | VTD_BF_SL_PDE_IGN_63_MASK) 838 /** @} */ 839 840 841 /** @name Second-Level PTE. 842 * In accordance with the Intel spec. 843 * @{ */ 844 /** R: Read. */ 845 #define VTD_BF_SL_PTE_R_SHIFT 0 846 #define VTD_BF_SL_PTE_R_MASK UINT64_C(0x0000000000000001) 847 /** W: Write. */ 848 #define VTD_BF_SL_PTE_W_SHIFT 1 849 #define VTD_BF_SL_PTE_W_MASK UINT64_C(0x0000000000000002) 850 /** X: Execute. */ 851 #define VTD_BF_SL_PTE_X_SHIFT 2 852 #define VTD_BF_SL_PTE_X_MASK UINT64_C(0x0000000000000004) 853 /** EMT: Extended Memory Type. */ 854 #define VTD_BF_SL_PTE_EMT_SHIFT 3 855 #define VTD_BF_SL_PTE_EMT_MASK UINT64_C(0x0000000000000038) 856 /** IPAT: Ignore PAT (Page Attribute Table). */ 857 #define VTD_BF_SL_PTE_IPAT_SHIFT 6 858 #define VTD_BF_SL_PTE_IPAT_MASK UINT64_C(0x0000000000000040) 859 /** IGN: Ignored (bit 7). */ 860 #define VTD_BF_SL_PTE_IGN_7_SHIFT 7 861 #define VTD_BF_SL_PTE_IGN_7_MASK UINT64_C(0x0000000000000080) 862 /** A: Accessed. */ 863 #define VTD_BF_SL_PTE_A_SHIFT 8 864 #define VTD_BF_SL_PTE_A_MASK UINT64_C(0x0000000000000100) 865 /** D: Dirty. */ 866 #define VTD_BF_SL_PTE_D_SHIFT 9 867 #define VTD_BF_SL_PTE_D_MASK UINT64_C(0x0000000000000200) 868 /** IGN: Ignored (bit 10). */ 869 #define VTD_BF_SL_PTE_IGN_10_SHIFT 10 870 #define VTD_BF_SL_PTE_IGN_10_MASK UINT64_C(0x0000000000000400) 871 /** R: Reserved (bit 11). */ 872 #define VTD_BF_SL_PTE_RSVD_11_SHIFT 11 873 #define VTD_BF_SL_PTE_RSVD_11_MASK UINT64_C(0x0000000000000800) 874 /** ADDR: Address of 4K page. */ 875 #define VTD_BF_SL_PTE_ADDR_SHIFT 12 876 #define VTD_BF_SL_PTE_ADDR_MASK UINT64_C(0x000ffffffffff000) 877 /** IGN: Ignored (bits 61:52). */ 878 #define VTD_BF_SL_PTE_IGN_61_52_SHIFT 52 879 #define VTD_BF_SL_PTE_IGN_61_52_MASK UINT64_C(0x3ff0000000000000) 880 /** R: Reserved (bit 62). */ 881 #define VTD_BF_SL_PTE_RSVD_62_SHIFT 62 882 #define VTD_BF_SL_PTE_RSVD_62_MASK UINT64_C(0x4000000000000000) 883 /** IGN: Ignored (bit 63). */ 884 #define VTD_BF_SL_PTE_IGN_63_SHIFT 63 885 #define VTD_BF_SL_PTE_IGN_63_MASK UINT64_C(0x8000000000000000) 886 RT_BF_ASSERT_COMPILE_CHECKS(VTD_BF_SL_PTE_, UINT64_C(0), UINT64_MAX, 887 (R, W, X, EMT, IPAT, IGN_7, A, D, IGN_10, RSVD_11, ADDR, IGN_61_52, RSVD_62, IGN_63)); 888 889 /** Second-level PTE valid mask. */ 890 #define VTD_SL_PTE_VALID_MASK ( VTD_BF_SL_PTE_R_MASK | VTD_BF_SL_PTE_W_MASK \ 891 | VTD_BF_SL_PTE_X_MASK | VTD_BF_SL_PTE_EMT_MASK \ 892 | VTD_BF_SL_PTE_IPAT_MASK | VTD_BF_SL_PTE_IGN_7_MASK \ 893 | VTD_BF_SL_PTE_A_MASK | VTD_BF_SL_PTE_D_MASK \ 894 | VTD_BF_SL_PTE_IGN_10_MASK | VTD_BF_SL_PTE_RSVD_11_MASK \ 895 | VTD_BF_SL_PTE_ADDR_MASK | VTD_BF_SL_PTE_IGN_61_52_MASK \ 896 | VTD_BF_SL_PTE_RSVD_62_MASK | VTD_BF_SL_PTE_IGN_63_MASK) 897 /** @} */ 898 899 900 /** @name Second-Level Generic Paging Entry. 901 * In accordance with the Intel spec. 902 * @{ */ 568 903 /** Second-Level Paging Entry. */ 569 904 typedef uint64_t VTD_SLP_ENTRY_T; … … 1372 1707 1373 1708 1374 /** @name VT-d faulted request attributes (FRCD_REG::EXE, FRCD_REG::PRIV).1375 * In accordance with the Intel spec.1376 * @{1377 */1378 /** Supervisory privilege was requested. */1379 #define VTD_REQ_ATTR_PRIV RT_BIT(0)1380 /** Execute permission was requested. */1381 #define VTD_REQ_ATTR_EXE RT_BIT(1)1382 /** @} */1383 1384 1385 1709 /** @name Advanced Fault Log Register (AFLOG_REG). 1386 1710 * In accordance with the Intel spec. -
trunk/src/VBox/Devices/Bus/DevIommuIntel.cpp
r89375 r89407 54 54 #define DMAR_IS_MMIO_OFF_VALID(a_off) ( (a_off) < DMAR_MMIO_GROUP_0_OFF_END \ 55 55 || (a_off) - DMAR_MMIO_GROUP_1_OFF_FIRST < DMAR_MMIO_GROUP_1_SIZE) 56 57 /** Gets the page offset mask given the number of bits to shift. */ 58 #define DMAR_GET_PAGE_OFF_MASK(a_cShift) (~(UINT64_C(0xffffffffffffffff) << (a_cShift))) 56 59 57 60 /** Acquires the DMAR lock but returns with the given busy error code on failure. */ … … 139 142 #define DMAR_ND 6 140 143 144 /** @name DMAR_PERM_XXX: DMA request permissions. 145 * The order of R, W, X bits is important as it corresponds to those bits in 146 * page-table entries. 147 * 148 * @{ */ 149 /** DMA request permission: Read. */ 150 #define DMAR_PERM_READ RT_BIT(0) 151 /** DMA request permission: Write. */ 152 #define DMAR_PERM_WRITE RT_BIT(1) 153 /** DMA request permission: Execute. */ 154 #define DMAR_PERM_EXE RT_BIT(2) 155 /** DMA request permission: Supervisor privilege. */ 156 #define DMAR_PERM_PRIV RT_BIT(3) 157 /** DMA request permissions: All. */ 158 #define DMAR_PERM_ALL (DMAR_PERM_READ | DMAR_PERM_WRITE | DMAR_PERM_EXE | DMAR_PERM_PRIV) 159 /** @} */ 160 141 161 /** Release log prefix string. */ 142 162 #define DMAR_LOG_PFX "Intel-IOMMU" … … 174 194 kDmarDiag_Atf_Lsl_1, 175 195 kDmarDiag_Atf_Lsl_2, 196 kDmarDiag_Atf_Lsl_2_LargePage, 176 197 kDmarDiag_Atf_Rta_1_1, 177 198 kDmarDiag_Atf_Rta_1_2, 178 199 kDmarDiag_Atf_Rta_1_3, 200 kDmarDiag_Atf_Ssl_1, 201 kDmarDiag_Atf_Ssl_2, 202 kDmarDiag_Atf_Ssl_3, 203 kDmarDiag_Atf_Ssl_3_LargePage, 179 204 180 205 /* CCMD_REG faults. */ … … 242 267 DMARDIAG_DESC(Atf_Lsl_1 ), 243 268 DMARDIAG_DESC(Atf_Lsl_2 ), 269 DMARDIAG_DESC(Atf_Lsl_2_LargePage ), 244 270 DMARDIAG_DESC(Atf_Rta_1_1 ), 245 271 DMARDIAG_DESC(Atf_Rta_1_2 ), 246 272 DMARDIAG_DESC(Atf_Rta_1_3 ), 273 DMARDIAG_DESC(Atf_Ssl_1 ), 274 DMARDIAG_DESC(Atf_Ssl_2 ), 275 DMARDIAG_DESC(Atf_Ssl_3 ), 276 DMARDIAG_DESC(Atf_Ssl_3_LargePage ), 247 277 DMARDIAG_DESC(CcmdReg_NotSupported ), 248 278 DMARDIAG_DESC(CcmdReg_Qi_Enabled ), … … 318 348 /** Maximum supported paging level (3, 4 or 5). */ 319 349 uint8_t uMaxPagingLevel; 350 /** DMA request valid permissions mask. */ 351 uint8_t fPermValidMask; 320 352 321 353 /** The event semaphore the invalidation-queue thread waits on. */ … … 432 464 433 465 /** 466 * I/O TLB entry. 467 */ 468 typedef struct DMARIOTLBE 469 { 470 RTGCPHYS GCPhysBase; 471 uint8_t cShift; 472 uint8_t fPerm; 473 uint16_t idDomain; 474 uint16_t uPadding0; 475 } DMARIOTLBE; 476 /** Pointer to an IOTLB entry. */ 477 typedef DMARIOTLBE *PDMARIOTLBE; 478 /** Pointer to a const IOTLB entry. */ 479 typedef DMARIOTLBE const *PCDMARIOTLBE; 480 481 /** 434 482 * DMA Address Remapping Information. 435 483 */ … … 438 486 /** The device ID (bus, device, function). */ 439 487 uint16_t idDevice; 440 /** The extended attributes of the request (VTD_REQ_ATTR_XXX). */441 uint8_t fReq Attr;488 /** The requested permissions (DMAR_PERM_XXX). */ 489 uint8_t fReqPerm; 442 490 /** The fault processing disabled (FPD) bit. */ 443 491 uint8_t fFpd; … … 456 504 size_t cbDma; 457 505 458 /** @todo Might have to split the result fields below into a separate structure and 459 * store extra info like cPageShift, permissions and attributes. */ 460 /** The translated system-physical address (HPA). */ 461 RTGCPHYS GCPhysSpa; 506 /** The IOTLBE for this remapping. */ 507 DMARIOTLBE Iotlbe; 462 508 /** The size of the contiguous translated region (in bytes). */ 463 509 size_t cbContiguous; 464 /** The domain ID. */465 uint16_t idDomain;466 510 } DMARADDRMAP; 467 511 /** Pointer to a DMA address remapping object. */ … … 1446 1490 uint8_t const fType1 = pAddrRemap->enmReqType & RT_BIT(1); 1447 1491 uint8_t const fType2 = pAddrRemap->enmReqType & RT_BIT(0); 1448 uint8_t const fExec = pAddrRemap->fReq Attr & VTD_REQ_ATTR_EXE;1449 uint8_t const fPriv = pAddrRemap->fReq Attr & VTD_REQ_ATTR_PRIV;1492 uint8_t const fExec = pAddrRemap->fReqPerm & DMAR_PERM_EXE; 1493 uint8_t const fPriv = pAddrRemap->fReqPerm & DMAR_PERM_PRIV; 1450 1494 uint64_t const uFrcdHi = RT_BF_MAKE(VTD_BF_1_FRCD_REG_SID, pAddrRemap->idDevice) 1451 1495 | RT_BF_MAKE(VTD_BF_1_FRCD_REG_T2, fType2) … … 1924 1968 * @param SlpEntry The second-level paging entry. 1925 1969 * @param uPagingLevel The paging level. 1970 * @param idDomain The domain ID for the translation. 1926 1971 * @param pAddrRemap The DMA address remap info. 1927 1972 */ 1928 static int dmarDrSecondLevelTranslate(PPDMDEVINS pDevIns, VTD_SLP_ENTRY_T SlpEntry, uint8_t uPagingLevel, PDMARADDRMAP pAddrRemap) 1973 static int dmarDrSecondLevelTranslate(PPDMDEVINS pDevIns, VTD_SLP_ENTRY_T SlpEntry, uint8_t uPagingLevel, uint16_t idDomain, 1974 PDMARADDRMAP pAddrRemap) 1929 1975 { 1930 1976 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR); 1977 1978 /* Mask of valid paging entry bits. */ 1979 static uint64_t const s_auPtEntityRsvd[] = { VTD_SL_PTE_VALID_MASK, 1980 VTD_SL_PDE_VALID_MASK, 1981 VTD_SL_PDPE_VALID_MASK, 1982 VTD_SL_PML4E_VALID_MASK, 1983 VTD_SL_PML5E_VALID_MASK }; 1984 1985 /* Mask of valid large-page paging entry bits. */ 1986 static uint64_t const s_auLargePageRsvd[] = { 0, 1987 VTD_SL_PDE2M_VALID_MASK, 1988 VTD_SL_PDPE1G_VALID_MASK, 1989 0, 1990 0 }; 1991 1992 /* Paranoia. */ 1931 1993 Assert(uPagingLevel >= 3 && uPagingLevel <= 5); 1994 AssertCompile(RT_ELEMENTS(s_auPtEntityRsvd) == RT_ELEMENTS(s_auLargePageRsvd)); 1995 AssertCompile(RT_ELEMENTS(s_auPtEntityRsvd) == 5); 1932 1996 1933 1997 /* … … 1940 2004 for (int8_t iLevel = uPagingLevel - 1; iLevel >= 0; iLevel--) 1941 2005 { 1942 /* Read the paging entry for the current level. */ 2006 /* 2007 * Read the paging entry for the current level. 2008 */ 2009 uint8_t const cLevelShift = 12 + ((iLevel - 1) * 9); 1943 2010 { 1944 uint8_t const cLevelShift = 12 + ((iLevel - 1) * 9);1945 2011 uint16_t const idxPte = (uDmaAddr >> cLevelShift) & UINT64_C(0x1ff); 1946 2012 uint64_t const offPte = idxPte << 3; … … 1952 2018 else 1953 2019 { 1954 /** @todo If this function is going to be used for scalable-mode second-level 1955 * translation, we need to report different error codes. The TTM is 1956 * available in pAddrRemap->fTtm, but how cleanly we can handle this is 1957 * something to be decided later. For now we just use legacy mode error 1958 * codes. Asserted as such below. */ 1959 Assert(pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE); 1960 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_1, VTDATFAULT_LSL_1, pAddrRemap); 2020 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2021 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_1, VTDATFAULT_LSL_1, pAddrRemap); 2022 else 2023 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_1, VTDATFAULT_SSL_1, pAddrRemap); 1961 2024 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 1962 2025 } 1963 2026 } 1964 2027 1965 /** @todo validate page table entity. */ 1966 /** @todo once we reach the level 1, compute final address with page offset. */ 1967 } 1968 return VERR_NOT_IMPLEMENTED; 2028 /* 2029 * Check I/O permissions. 2030 * This must be done prior to check reserved bits for properly reporting errors SSL.2 and SSL.3. 2031 * 2032 * See Intel spec. 7.1.3 "Fault conditions and Remapping hardware behavior for various request". 2033 */ 2034 uint8_t const fReqPerm = pAddrRemap->fReqPerm & pThis->fPermValidMask; 2035 uint8_t const fPtPerm = uPtEntity & pThis->fPermValidMask; 2036 if ((fPtPerm & fReqPerm) == fReqPerm) 2037 { /* likely */ } 2038 else 2039 { 2040 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2041 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_2, VTDATFAULT_LSL_2, pAddrRemap); 2042 else 2043 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_2, VTDATFAULT_SSL_2, pAddrRemap); 2044 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 2045 } 2046 2047 /* 2048 * Validate reserved bits of the current paging entry. 2049 */ 2050 if (!(uPtEntity & ~s_auPtEntityRsvd[iLevel])) 2051 { /* likely */ } 2052 else 2053 { 2054 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2055 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_2, VTDATFAULT_LSL_2, pAddrRemap); 2056 else 2057 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_3, VTDATFAULT_SSL_3, pAddrRemap); 2058 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 2059 } 2060 2061 /* 2062 * Check if this is a 1GB page or a 2MB page. 2063 */ 2064 AssertCompile(VTD_BF_SL_PDE_PS_MASK == VTD_BF_SL_PDPE_PS_MASK); 2065 uint8_t const fLargePage = RT_BF_GET(uPtEntity, VTD_BF_SL_PDE_PS); 2066 if (fLargePage && iLevel > 0) 2067 { 2068 Assert(iLevel == 1 || iLevel == 2); 2069 uint8_t const fSllpsMask = RT_BF_GET(pThis->fCapReg, VTD_BF_CAP_REG_SLLPS); 2070 if (fSllpsMask & RT_BIT(iLevel - 1)) 2071 { 2072 pAddrRemap->Iotlbe.GCPhysBase = uPtEntity & ~(RT_BIT_64(cLevelShift) - 1); 2073 pAddrRemap->Iotlbe.cShift = cLevelShift; 2074 pAddrRemap->Iotlbe.fPerm = fPtPerm; 2075 pAddrRemap->Iotlbe.idDomain = idDomain; 2076 return VINF_SUCCESS; 2077 } 2078 2079 if (pAddrRemap->fTtm == VTD_TTM_LEGACY_MODE) 2080 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lsl_2_LargePage, VTDATFAULT_LSL_2, pAddrRemap); 2081 else 2082 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Ssl_3_LargePage, VTDATFAULT_SSL_3, pAddrRemap); 2083 return VERR_IOMMU_ADDR_TRANSLATION_FAILED; 2084 } 2085 2086 /* 2087 * If this is the final PTE, compute the translation address and we're done. 2088 */ 2089 if (iLevel == 0) 2090 { 2091 pAddrRemap->Iotlbe.GCPhysBase = uPtEntity & ~(RT_BIT_64(cLevelShift) - 1); 2092 pAddrRemap->Iotlbe.cShift = cLevelShift; 2093 pAddrRemap->Iotlbe.fPerm = fPtPerm; 2094 pAddrRemap->Iotlbe.idDomain = idDomain; 2095 return VINF_SUCCESS; 2096 } 2097 } 2098 2099 /* Shouldn't ever reach here. */ 2100 return VERR_IOMMU_IPE_0; 1969 2101 } 1970 2102 … … 2019 2151 && !(uCtxEntryQword1 & ~VTD_CONTEXT_ENTRY_1_VALID_MASK)) 2020 2152 { 2153 /* Get the domain ID for this mapping. */ 2154 uint16_t const idDomain = RT_BF_GET(uCtxEntryQword1, VTD_BF_1_CONTEXT_ENTRY_DID); 2155 2021 2156 /* Validate the translation type (TT). */ 2022 2157 PCDMAR pThis = PDMDEVINS_2_DATA(pDevIns, PCDMAR); … … 2042 2177 if (RT_SUCCESS(rc)) 2043 2178 { 2044 /* Note the domain ID this context-entry maps to. */2045 pAddrRemap->idDomain = RT_BF_GET(uCtxEntryQword1, VTD_BF_1_CONTEXT_ENTRY_DID);2046 2047 2179 /* Finally... perform second-level translation. */ 2048 return dmarDrSecondLevelTranslate(pDevIns, SlpEntry, uPagingLevel, pAddrRemap); 2180 return dmarDrSecondLevelTranslate(pDevIns, SlpEntry, uPagingLevel, idDomain, 2181 pAddrRemap); 2049 2182 } 2050 2183 dmarAtFaultRecord(pDevIns, kDmarDiag_Atf_Lct_4_3, VTDATFAULT_LCT_4_3, pAddrRemap); … … 2070 2203 { 2071 2204 /** @todo Check AW == maximum SAGAW bit? */ 2072 pAddrRemap->GCPhysSpa = pAddrRemap->uDmaAddr; 2073 pAddrRemap->cbContiguous = pAddrRemap->cbDma; 2205 pAddrRemap->Iotlbe.GCPhysBase = pAddrRemap->uDmaAddr & X86_PAGE_4K_BASE_MASK; 2206 pAddrRemap->Iotlbe.cShift = X86_PAGE_4K_SHIFT; 2207 pAddrRemap->Iotlbe.fPerm = DMAR_PERM_ALL; 2208 pAddrRemap->Iotlbe.idDomain = idDomain; 2209 pAddrRemap->cbContiguous = pAddrRemap->cbDma; 2074 2210 return VINF_SUCCESS; 2075 2211 } … … 2198 2334 { 2199 2335 VTDREQTYPE enmReqType; 2336 uint8_t fReqPerm; 2200 2337 if (fFlags & PDMIOMMU_MEM_F_READ) 2201 2338 { 2202 2339 enmReqType = VTDREQTYPE_READ; 2340 fReqPerm = DMAR_PERM_READ; 2203 2341 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemRead)); 2204 2342 } … … 2206 2344 { 2207 2345 enmReqType = VTDREQTYPE_WRITE; 2346 fReqPerm = DMAR_PERM_WRITE; 2208 2347 STAM_COUNTER_INC(&pThis->CTX_SUFF_Z(StatMemWrite)); 2209 2348 } … … 2211 2350 uint8_t const fTtm = RT_BF_GET(uRtaddrReg, VTD_BF_RTADDR_REG_TTM); 2212 2351 DMARADDRMAP AddrRemap; 2213 AddrRemap.idDevice = idDevice; 2214 AddrRemap.Pasid = NIL_PCIPASID; 2215 AddrRemap.enmAddrType = PCIADDRTYPE_UNTRANSLATED; 2216 AddrRemap.enmReqType = enmReqType; 2217 AddrRemap.fTtm = fTtm; 2218 AddrRemap.uDmaAddr = uIova; 2219 AddrRemap.cbDma = cbIova; 2220 AddrRemap.GCPhysSpa = NIL_RTGCPHYS; 2221 AddrRemap.cbContiguous = 0; 2352 RT_ZERO(AddrRemap); 2353 AddrRemap.idDevice = idDevice; 2354 AddrRemap.fReqPerm = fReqPerm; 2355 AddrRemap.Pasid = NIL_PCIPASID; 2356 AddrRemap.enmAddrType = PCIADDRTYPE_UNTRANSLATED; 2357 AddrRemap.enmReqType = enmReqType; 2358 AddrRemap.fTtm = fTtm; 2359 AddrRemap.uDmaAddr = uIova; 2360 AddrRemap.cbDma = cbIova; 2361 AddrRemap.Iotlbe.GCPhysBase = NIL_RTGCPHYS; 2222 2362 2223 2363 int rc; … … 2255 2395 2256 2396 *pcbContiguous = AddrRemap.cbContiguous; 2257 *pGCPhysSpa = AddrRemap. GCPhysSpa;2397 *pGCPhysSpa = AddrRemap.Iotlbe.GCPhysBase | DMAR_GET_PAGE_OFF_MASK(AddrRemap.Iotlbe.cShift); 2258 2398 return rc; 2259 2399 } … … 3334 3474 uint8_t const fEim = 1; /* Extended interrupt mode.*/ 3335 3475 uint8_t const fAdms = 1; /* Abort DMA mode support. */ 3476 uint8_t const fErs = 0; /* Execute Request (not supported). */ 3336 3477 3337 3478 pThis->fExtCapReg = RT_BF_MAKE(VTD_BF_ECAP_REG_C, 0) /* Accesses don't snoop CPU cache. */ … … 3347 3488 | RT_BF_MAKE(VTD_BF_ECAP_REG_NEST, fNest) 3348 3489 | RT_BF_MAKE(VTD_BF_ECAP_REG_PRS, 0) /* 0 as DT not supported. */ 3349 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, 0) /* Execute request not supported. */3490 | RT_BF_MAKE(VTD_BF_ECAP_REG_ERS, fErs) 3350 3491 | RT_BF_MAKE(VTD_BF_ECAP_REG_SRS, 0) /* Supervisor request not supported. */ 3351 3492 | RT_BF_MAKE(VTD_BF_ECAP_REG_NWFS, 0) /* 0 as DT not supported. */ … … 3365 3506 | RT_BF_MAKE(VTD_BF_ECAP_REG_RPRIVS, 0); /* 0 as SRS not supported. */ 3366 3507 dmarRegWriteRaw64(pThis, VTD_MMIO_OFF_ECAP_REG, pThis->fExtCapReg); 3508 3509 pThis->fPermValidMask = DMAR_PERM_READ | DMAR_PERM_WRITE; 3510 if (fErs) 3511 pThis->fPermValidMask = DMAR_PERM_EXE; 3367 3512 } 3368 3513
Note:
See TracChangeset
for help on using the changeset viewer.