Changeset 42372 in vbox
- Timestamp:
- Jul 24, 2012 9:50:16 PM (13 years ago)
- svn:sync-xref-src-repo-rev:
- 79454
- Location:
- trunk
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/err.h
r42192 r42372 990 990 /** The guest LDT selector is out of bounds. */ 991 991 #define VERR_SELM_LDT_OUT_OF_BOUNDS (-2506) 992 /** Unknown error while reading the guest GDT during shadow table updating. */ 993 #define VERR_SELM_GDT_READ_ERROR (-2507) 994 /** The guest GDT so full that we cannot find free space for our own 995 * selectors. */ 996 #define VERR_SELM_GDT_TOO_FULL (-2508) 992 997 /** @} */ 993 998 -
trunk/src/VBox/VMM/VMMR3/SELM.cpp
r42371 r42372 205 205 STAM_REG(pVM, &pVM->selm.s.StatUpdateFromCPUM, STAMTYPE_PROFILE, "/PROF/SELM/UpdateFromCPUM", STAMUNIT_TICKS_PER_CALL, "Profiling of the SELMR3UpdateFromCPUM() body."); 206 206 207 STAM_RE G(pVM, &pVM->selm.s.StatHyperSelsChanged,STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors.");208 STAM_RE G(pVM, &pVM->selm.s.StatScanForHyperSels,STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors.");207 STAM_REL_REG(pVM, &pVM->selm.s.StatHyperSelsChanged, STAMTYPE_COUNTER, "/SELM/HyperSels/Changed", STAMUNIT_OCCURENCES, "The number of times we had to relocate our hypervisor selectors."); 208 STAM_REL_REG(pVM, &pVM->selm.s.StatScanForHyperSels, STAMTYPE_COUNTER, "/SELM/HyperSels/Scan", STAMUNIT_OCCURENCES, "The number of times we had find free hypervisor selectors."); 209 209 210 210 /* … … 774 774 775 775 776 /** 777 * Updates the Guest GDT & LDT virtualization based on current CPU state. 778 * 779 * @returns VBox status code. 780 * @param pVM Pointer to the VM. 781 * @param pVCpu Pointer to the VMCPU. 782 */ 783 VMMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu) 784 { 785 int rc = VINF_SUCCESS; 786 787 if (pVM->selm.s.fDisableMonitoring) 788 { 789 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 790 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 791 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 792 776 static int selmR3UpdateShadowGdt(PVM pVM, PVMCPU pVCpu) 777 { 778 /* 779 * Always assume the best... 780 */ 781 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 782 783 /* If the GDT was changed, then make sure the LDT is checked too */ 784 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */ 785 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 786 /* Same goes for the TSS selector */ 787 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 788 789 /* 790 * Get the GDTR and check if there is anything to do (there usually is). 791 */ 792 VBOXGDTR GDTR; 793 CPUMGetGuestGDTR(pVCpu, &GDTR); 794 if (GDTR.cbGdt < sizeof(X86DESC)) 795 { 796 Log(("No GDT entries...\n")); 793 797 return VINF_SUCCESS; 794 798 } 795 799 796 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a); 797 798 /* 799 * GDT sync 800 */ 801 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)) 800 /* 801 * Read the Guest GDT. 802 * ASSUMES that the entire GDT is in memory. 803 */ 804 RTUINT cbEffLimit = GDTR.cbGdt; 805 PX86DESC pGDTE = &pVM->selm.s.paGdtR3[1]; 806 int rc = PGMPhysSimpleReadGCPtr(pVCpu, pGDTE, GDTR.pGdt + sizeof(X86DESC), cbEffLimit + 1 - sizeof(X86DESC)); 807 if (RT_FAILURE(rc)) 802 808 { 803 809 /* 804 * Always assume the best 810 * Read it page by page. 811 * 812 * Keep track of the last valid page and delay memsets and 813 * adjust cbEffLimit to reflect the effective size. The latter 814 * is something we do in the belief that the guest will probably 815 * never actually commit the last page, thus allowing us to keep 816 * our selectors in the high end of the GDT. 805 817 */ 806 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 807 808 /* If the GDT was changed, then make sure the LDT is checked too */ 809 /** @todo only do this if the actual ldtr selector was changed; this is a bit excessive */ 810 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 811 /* Same goes for the TSS selector */ 812 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 813 814 /* 815 * Get the GDTR and check if there is anything to do (there usually is). 816 */ 817 VBOXGDTR GDTR; 818 CPUMGetGuestGDTR(pVCpu, &GDTR); 819 if (GDTR.cbGdt < sizeof(X86DESC)) 820 { 821 Log(("No GDT entries...\n")); 822 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 823 return VINF_SUCCESS; 824 } 825 826 /* 827 * Read the Guest GDT. 828 * ASSUMES that the entire GDT is in memory. 829 */ 830 RTUINT cbEffLimit = GDTR.cbGdt; 831 PX86DESC pGDTE = &pVM->selm.s.paGdtR3[1]; 832 rc = PGMPhysSimpleReadGCPtr(pVCpu, pGDTE, GDTR.pGdt + sizeof(X86DESC), cbEffLimit + 1 - sizeof(X86DESC)); 833 if (RT_FAILURE(rc)) 818 RTUINT cbLeft = cbEffLimit + 1 - sizeof(X86DESC); 819 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(X86DESC); 820 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtR3[1]; 821 uint8_t *pu8DstInvalid = pu8Dst; 822 823 while (cbLeft) 824 { 825 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK); 826 cb = RT_MIN(cb, cbLeft); 827 rc = PGMPhysSimpleReadGCPtr(pVCpu, pu8Dst, GCPtrSrc, cb); 828 if (RT_SUCCESS(rc)) 829 { 830 if (pu8DstInvalid != pu8Dst) 831 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid); 832 GCPtrSrc += cb; 833 pu8Dst += cb; 834 pu8DstInvalid = pu8Dst; 835 } 836 else if ( rc == VERR_PAGE_NOT_PRESENT 837 || rc == VERR_PAGE_TABLE_NOT_PRESENT) 838 { 839 GCPtrSrc += cb; 840 pu8Dst += cb; 841 } 842 else 843 { 844 AssertLogRelMsgFailed(("Couldn't read GDT at %016RX64, rc=%Rrc!\n", GDTR.pGdt, rc)); 845 return VERR_SELM_GDT_READ_ERROR; 846 } 847 cbLeft -= cb; 848 } 849 850 /* any invalid pages at the end? */ 851 if (pu8DstInvalid != pu8Dst) 852 { 853 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtR3 - 1; 854 /* If any GDTEs was invalidated, zero them. */ 855 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit) 856 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit); 857 } 858 859 /* keep track of the effective limit. */ 860 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit) 861 { 862 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n", 863 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt)); 864 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit; 865 } 866 } 867 868 /* 869 * Check if the Guest GDT intrudes on our GDT entries. 870 */ 871 /** @todo we should try to minimize relocations by making sure our current selectors can be reused. */ 872 RTSEL aHyperSel[SELM_HYPER_SEL_MAX]; 873 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE) 874 { 875 PX86DESC pGDTEStart = pVM->selm.s.paGdtR3; 876 PX86DESC pGDTECur = (PX86DESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(X86DESC)); 877 int iGDT = 0; 878 879 Log(("Internal SELM GDT conflict: use non-present entries\n")); 880 STAM_REL_COUNTER_INC(&pVM->selm.s.StatScanForHyperSels); 881 while (pGDTECur > pGDTEStart) 882 { 883 /* We can reuse non-present entries */ 884 if (!pGDTECur->Gen.u1Present) 885 { 886 aHyperSel[iGDT] = ((uintptr_t)pGDTECur - (uintptr_t)pVM->selm.s.paGdtR3) / sizeof(X86DESC); 887 aHyperSel[iGDT] = aHyperSel[iGDT] << X86_SEL_SHIFT; 888 Log(("SELM: Found unused GDT %04X\n", aHyperSel[iGDT])); 889 iGDT++; 890 if (iGDT >= SELM_HYPER_SEL_MAX) 891 break; 892 } 893 894 pGDTECur--; 895 } 896 if (iGDT != SELM_HYPER_SEL_MAX) 897 { 898 AssertLogRelMsgFailed(("Internal SELM GDT conflict.\n")); 899 return VERR_SELM_GDT_TOO_FULL; 900 } 901 } 902 else 903 { 904 aHyperSel[SELM_HYPER_SEL_CS] = SELM_HYPER_DEFAULT_SEL_CS; 905 aHyperSel[SELM_HYPER_SEL_DS] = SELM_HYPER_DEFAULT_SEL_DS; 906 aHyperSel[SELM_HYPER_SEL_CS64] = SELM_HYPER_DEFAULT_SEL_CS64; 907 aHyperSel[SELM_HYPER_SEL_TSS] = SELM_HYPER_DEFAULT_SEL_TSS; 908 aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SELM_HYPER_DEFAULT_SEL_TSS_TRAP08; 909 } 910 911 /* 912 * Work thru the copied GDT entries adjusting them for correct virtualization. 913 */ 914 PX86DESC pGDTEEnd = (PX86DESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(X86DESC)); 915 while (pGDTE < pGDTEEnd) 916 { 917 if (pGDTE->Gen.u1Present) 834 918 { 835 919 /* 836 * Read it page by page. 837 * 838 * Keep track of the last valid page and delay memsets and 839 * adjust cbEffLimit to reflect the effective size. The latter 840 * is something we do in the belief that the guest will probably 841 * never actually commit the last page, thus allowing us to keep 842 * our selectors in the high end of the GDT. 920 * Code and data selectors are generally 1:1, with the 921 * 'little' adjustment we do for DPL 0 selectors. 843 922 */ 844 RTUINT cbLeft = cbEffLimit + 1 - sizeof(X86DESC); 845 RTGCPTR GCPtrSrc = (RTGCPTR)GDTR.pGdt + sizeof(X86DESC); 846 uint8_t *pu8Dst = (uint8_t *)&pVM->selm.s.paGdtR3[1]; 847 uint8_t *pu8DstInvalid = pu8Dst; 848 849 while (cbLeft) 850 { 851 RTUINT cb = PAGE_SIZE - (GCPtrSrc & PAGE_OFFSET_MASK); 852 cb = RT_MIN(cb, cbLeft); 853 rc = PGMPhysSimpleReadGCPtr(pVCpu, pu8Dst, GCPtrSrc, cb); 854 if (RT_SUCCESS(rc)) 855 { 856 if (pu8DstInvalid != pu8Dst) 857 memset(pu8DstInvalid, 0, pu8Dst - pu8DstInvalid); 858 GCPtrSrc += cb; 859 pu8Dst += cb; 860 pu8DstInvalid = pu8Dst; 861 } 862 else if ( rc == VERR_PAGE_NOT_PRESENT 863 || rc == VERR_PAGE_TABLE_NOT_PRESENT) 864 { 865 GCPtrSrc += cb; 866 pu8Dst += cb; 867 } 868 else 869 { 870 AssertReleaseMsgFailed(("Couldn't read GDT at %016RX64, rc=%Rrc!\n", GDTR.pGdt, rc)); 871 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 872 return VERR_NOT_IMPLEMENTED; 873 } 874 cbLeft -= cb; 875 } 876 877 /* any invalid pages at the end? */ 878 if (pu8DstInvalid != pu8Dst) 879 { 880 cbEffLimit = pu8DstInvalid - (uint8_t *)pVM->selm.s.paGdtR3 - 1; 881 /* If any GDTEs was invalidated, zero them. */ 882 if (cbEffLimit < pVM->selm.s.cbEffGuestGdtLimit) 883 memset(pu8DstInvalid + cbEffLimit + 1, 0, pVM->selm.s.cbEffGuestGdtLimit - cbEffLimit); 884 } 885 886 /* keep track of the effective limit. */ 887 if (cbEffLimit != pVM->selm.s.cbEffGuestGdtLimit) 888 { 889 Log(("SELMR3UpdateFromCPUM: cbEffGuestGdtLimit=%#x -> %#x (actual %#x)\n", 890 pVM->selm.s.cbEffGuestGdtLimit, cbEffLimit, GDTR.cbGdt)); 891 pVM->selm.s.cbEffGuestGdtLimit = cbEffLimit; 892 } 893 } 894 895 /* 896 * Check if the Guest GDT intrudes on our GDT entries. 897 */ 898 /** @todo we should try to minimize relocations by making sure our current selectors can be reused. */ 899 RTSEL aHyperSel[SELM_HYPER_SEL_MAX]; 900 if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE) 901 { 902 PX86DESC pGDTEStart = pVM->selm.s.paGdtR3; 903 PX86DESC pGDTECur = (PX86DESC)((char *)pGDTEStart + GDTR.cbGdt + 1 - sizeof(X86DESC)); 904 int iGDT = 0; 905 906 Log(("Internal SELM GDT conflict: use non-present entries\n")); 907 STAM_COUNTER_INC(&pVM->selm.s.StatScanForHyperSels); 908 while (pGDTECur > pGDTEStart) 909 { 910 /* We can reuse non-present entries */ 911 if (!pGDTECur->Gen.u1Present) 912 { 913 aHyperSel[iGDT] = ((uintptr_t)pGDTECur - (uintptr_t)pVM->selm.s.paGdtR3) / sizeof(X86DESC); 914 aHyperSel[iGDT] = aHyperSel[iGDT] << X86_SEL_SHIFT; 915 Log(("SELM: Found unused GDT %04X\n", aHyperSel[iGDT])); 916 iGDT++; 917 if (iGDT >= SELM_HYPER_SEL_MAX) 918 break; 919 } 920 921 pGDTECur--; 922 } 923 if (iGDT != SELM_HYPER_SEL_MAX) 924 { 925 AssertReleaseMsgFailed(("Internal SELM GDT conflict.\n")); 926 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 927 return VERR_NOT_IMPLEMENTED; 928 } 929 } 930 else 931 { 932 aHyperSel[SELM_HYPER_SEL_CS] = SELM_HYPER_DEFAULT_SEL_CS; 933 aHyperSel[SELM_HYPER_SEL_DS] = SELM_HYPER_DEFAULT_SEL_DS; 934 aHyperSel[SELM_HYPER_SEL_CS64] = SELM_HYPER_DEFAULT_SEL_CS64; 935 aHyperSel[SELM_HYPER_SEL_TSS] = SELM_HYPER_DEFAULT_SEL_TSS; 936 aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = SELM_HYPER_DEFAULT_SEL_TSS_TRAP08; 937 } 938 939 /* 940 * Work thru the copied GDT entries adjusting them for correct virtualization. 941 */ 942 PX86DESC pGDTEEnd = (PX86DESC)((char *)pGDTE + cbEffLimit + 1 - sizeof(X86DESC)); 943 while (pGDTE < pGDTEEnd) 944 { 945 if (pGDTE->Gen.u1Present) 923 if (pGDTE->Gen.u1DescType) 946 924 { 947 925 /* 948 * Code and data selectors are generally 1:1, with the 949 * 'little' adjustment we do for DPL 0 selectors. 926 * Hack for A-bit against Trap E on read-only GDT. 950 927 */ 951 if (pGDTE->Gen.u1DescType) 952 { 953 /* 954 * Hack for A-bit against Trap E on read-only GDT. 955 */ 956 /** @todo Fix this by loading ds and cs before turning off WP. */ 957 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 958 959 /* 960 * All DPL 0 code and data segments are squeezed into DPL 1. 961 * 962 * We're skipping conforming segments here because those 963 * cannot give us any trouble. 964 */ 965 if ( pGDTE->Gen.u2Dpl == 0 966 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 967 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 968 pGDTE->Gen.u2Dpl = 1; 969 } 970 else 971 { 972 /* 973 * System type selectors are marked not present. 974 * Recompiler or special handling is required for these. 975 */ 976 /** @todo what about interrupt gates and rawr0? */ 977 pGDTE->Gen.u1Present = 0; 978 } 928 /** @todo Fix this by loading ds and cs before turning off WP. */ 929 pGDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 930 931 /* 932 * All DPL 0 code and data segments are squeezed into DPL 1. 933 * 934 * We're skipping conforming segments here because those 935 * cannot give us any trouble. 936 */ 937 if ( pGDTE->Gen.u2Dpl == 0 938 && (pGDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 939 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 940 pGDTE->Gen.u2Dpl = 1; 979 941 } 980 981 /* Next GDT entry. */ 982 pGDTE++; 983 } 942 else 943 { 944 /* 945 * System type selectors are marked not present. 946 * Recompiler or special handling is required for these. 947 */ 948 /** @todo what about interrupt gates and rawr0? */ 949 pGDTE->Gen.u1Present = 0; 950 } 951 } 952 953 /* Next GDT entry. */ 954 pGDTE++; 955 } 956 957 /* 958 * Check if our hypervisor selectors were changed. 959 */ 960 if ( aHyperSel[SELM_HYPER_SEL_CS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] 961 || aHyperSel[SELM_HYPER_SEL_DS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] 962 || aHyperSel[SELM_HYPER_SEL_CS64] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] 963 || aHyperSel[SELM_HYPER_SEL_TSS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] 964 || aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]) 965 { 966 /* Reinitialize our hypervisor GDTs */ 967 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = aHyperSel[SELM_HYPER_SEL_CS]; 968 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = aHyperSel[SELM_HYPER_SEL_DS]; 969 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = aHyperSel[SELM_HYPER_SEL_CS64]; 970 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = aHyperSel[SELM_HYPER_SEL_TSS]; 971 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]; 972 973 STAM_REL_COUNTER_INC(&pVM->selm.s.StatHyperSelsChanged); 984 974 985 975 /* 986 * Check if our hypervisor selectors were changed. 976 * Do the relocation callbacks to let everyone update their hyper selector dependencies. 977 * (SELMR3Relocate will call selmR3SetupHyperGDTSelectors() for us.) 987 978 */ 988 if ( aHyperSel[SELM_HYPER_SEL_CS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] 989 || aHyperSel[SELM_HYPER_SEL_DS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] 990 || aHyperSel[SELM_HYPER_SEL_CS64] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] 991 || aHyperSel[SELM_HYPER_SEL_TSS] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] 992 || aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] != pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]) 993 { 994 /* Reinitialize our hypervisor GDTs */ 995 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS] = aHyperSel[SELM_HYPER_SEL_CS]; 996 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_DS] = aHyperSel[SELM_HYPER_SEL_DS]; 997 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_CS64] = aHyperSel[SELM_HYPER_SEL_CS64]; 998 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS] = aHyperSel[SELM_HYPER_SEL_TSS]; 999 pVM->selm.s.aHyperSel[SELM_HYPER_SEL_TSS_TRAP08] = aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]; 1000 1001 STAM_COUNTER_INC(&pVM->selm.s.StatHyperSelsChanged); 1002 1003 /* 1004 * Do the relocation callbacks to let everyone update their hyper selector dependencies. 1005 * (SELMR3Relocate will call selmR3SetupHyperGDTSelectors() for us.) 1006 */ 1007 VMR3Relocate(pVM, 0); 1008 } 1009 else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE) 1010 /* We overwrote all entries above, so we have to save them again. */ 1011 selmR3SetupHyperGDTSelectors(pVM); 979 VMR3Relocate(pVM, 0); 980 } 981 else if (cbEffLimit >= SELM_HYPER_DEFAULT_BASE) 982 /* We overwrote all entries above, so we have to save them again. */ 983 selmR3SetupHyperGDTSelectors(pVM); 984 985 /* 986 * Adjust the cached GDT limit. 987 * Any GDT entries which have been removed must be cleared. 988 */ 989 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt) 990 { 991 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt) 992 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt); 993 } 994 995 /* 996 * Check if Guest's GDTR is changed. 997 */ 998 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt 999 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt) 1000 { 1001 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt)); 1012 1002 1013 1003 /* 1014 * Adjust the cached GDT limit. 1015 * Any GDT entries which have been removed must be cleared. 1004 * [Re]Register write virtual handler for guest's GDT. 1016 1005 */ 1017 if (pVM->selm.s.GuestGdtr.cbGdt != GDTR.cbGdt) 1018 { 1019 if (pVM->selm.s.GuestGdtr.cbGdt > GDTR.cbGdt) 1020 memset(pGDTE, 0, pVM->selm.s.GuestGdtr.cbGdt - GDTR.cbGdt); 1021 } 1022 1006 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered) 1007 { 1008 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt); 1009 AssertRC(rc); 1010 } 1011 1012 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, 1013 GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */, 1014 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0, 1015 "Guest GDT write access handler"); 1016 if (RT_FAILURE(rc)) 1017 return rc; 1018 1019 /* Update saved Guest GDTR. */ 1020 pVM->selm.s.GuestGdtr = GDTR; 1021 pVM->selm.s.fGDTRangeRegistered = true; 1022 } 1023 1024 return VINF_SUCCESS; 1025 } 1026 1027 1028 /** 1029 * Updates (syncs) the shadow LDT. 1030 * 1031 * @returns VBox status code. 1032 * @param pVM The VM handle. 1033 * @param pVCpu The current virtual CPU. 1034 */ 1035 static int selmR3UpdateShadowLdt(PVM pVM, PVMCPU pVCpu) 1036 { 1037 int rc = VINF_SUCCESS; 1038 1039 /* 1040 * Always assume the best... 1041 */ 1042 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 1043 1044 /* 1045 * LDT handling is done similarly to the GDT handling with a shadow 1046 * array. However, since the LDT is expected to be swappable (at least 1047 * some ancient OSes makes it swappable) it must be floating and 1048 * synced on a per-page basis. 1049 * 1050 * Eventually we will change this to be fully on demand. Meaning that 1051 * we will only sync pages containing LDT selectors actually used and 1052 * let the #PF handler lazily sync pages as they are used. 1053 * (This applies to GDT too, when we start making OS/2 fast.) 1054 */ 1055 1056 /* 1057 * First, determine the current LDT selector. 1058 */ 1059 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu); 1060 if ((SelLdt & X86_SEL_MASK) == 0) 1061 { 1062 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */ 1063 CPUMSetHyperLDTR(pVCpu, 0); 1064 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX) 1065 { 1066 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt); 1067 AssertRC(rc); 1068 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 1069 } 1070 return VINF_SUCCESS; 1071 } 1072 1073 /* 1074 * Get the LDT selector. 1075 */ 1076 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT]; 1077 RTGCPTR GCPtrLdt = X86DESC_BASE(*pDesc); 1078 unsigned cbLdt = X86DESC_LIMIT(*pDesc); 1079 if (pDesc->Gen.u1Granularity) 1080 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1081 1082 /* 1083 * Validate it. 1084 */ 1085 if ( !cbLdt 1086 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt 1087 || pDesc->Gen.u1DescType 1088 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT) 1089 { 1090 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt)); 1091 1092 /* cbLdt > 0: 1093 * This is quite impossible, so we do as most people do when faced with 1094 * the impossible, we simply ignore it. 1095 */ 1096 CPUMSetHyperLDTR(pVCpu, 0); 1097 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX) 1098 { 1099 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt); 1100 AssertRC(rc); 1101 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 1102 } 1103 return VINF_SUCCESS; 1104 } 1105 /** @todo check what intel does about odd limits. */ 1106 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt)); 1107 1108 /* 1109 * Use the cached guest ldt address if the descriptor has already been modified (see below) 1110 * (this is necessary due to redundant LDT updates; see todo above at GDT sync) 1111 */ 1112 if (MMHyperIsInsideArea(pVM, GCPtrLdt)) 1113 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */ 1114 1115 1116 /** @todo Handle only present LDT segments. */ 1117 // if (pDesc->Gen.u1Present) 1118 { 1023 1119 /* 1024 * Check if Guest's GDTRis changed.1120 * Check if Guest's LDT address/limit is changed. 1025 1121 */ 1026 if ( GDTR.pGdt != pVM->selm.s.GuestGdtr.pGdt 1027 || GDTR.cbGdt != pVM->selm.s.GuestGdtr.cbGdt) 1028 { 1029 Log(("SELMR3UpdateFromCPUM: Guest's GDT is changed to pGdt=%016RX64 cbGdt=%08X\n", GDTR.pGdt, GDTR.cbGdt)); 1122 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt 1123 || cbLdt != pVM->selm.s.cbLdtLimit) 1124 { 1125 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %RGv:%04x to %RGv:%04x. (GDTR=%016RX64:%04x)\n", 1126 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt)); 1030 1127 1031 1128 /* 1032 1129 * [Re]Register write virtual handler for guest's GDT. 1130 * In the event of LDT overlapping something, don't install it just assume it's being updated. 1033 1131 */ 1034 if (pVM->selm.s.GuestGdtr.pGdt != RTRCPTR_MAX && pVM->selm.s.fGDTRangeRegistered)1035 {1036 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GuestGdtr.pGdt);1037 AssertRC(rc);1038 }1039 1040 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GDTR.pGdt, GDTR.pGdt + GDTR.cbGdt /* already inclusive */,1041 0, selmR3GuestGDTWriteHandler, "selmRCGuestGDTWriteHandler", 0, "Guest GDT write access handler");1042 if (RT_FAILURE(rc))1043 return rc;1044 1045 /* Update saved Guest GDTR. */1046 pVM->selm.s.GuestGdtr = GDTR;1047 pVM->selm.s.fGDTRangeRegistered = true;1048 }1049 }1050 1051 /*1052 * TSS sync1053 */1054 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS))1055 {1056 SELMR3SyncTSS(pVM, pVCpu);1057 }1058 1059 /*1060 * LDT sync1061 */1062 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_LDT))1063 {1064 /*1065 * Always assume the best1066 */1067 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT);1068 1069 /*1070 * LDT handling is done similarly to the GDT handling with a shadow1071 * array. However, since the LDT is expected to be swappable (at least1072 * some ancient OSes makes it swappable) it must be floating and1073 * synced on a per-page basis.1074 *1075 * Eventually we will change this to be fully on demand. Meaning that1076 * we will only sync pages containing LDT selectors actually used and1077 * let the #PF handler lazily sync pages as they are used.1078 * (This applies to GDT too, when we start making OS/2 fast.)1079 */1080 1081 /*1082 * First, determine the current LDT selector.1083 */1084 RTSEL SelLdt = CPUMGetGuestLDTR(pVCpu);1085 if ((SelLdt & X86_SEL_MASK) == 0)1086 {1087 /* ldtr = 0 - update hyper LDTR and deregister any active handler. */1088 CPUMSetHyperLDTR(pVCpu, 0);1089 1132 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX) 1090 1133 { 1091 1134 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt); 1092 1135 AssertRC(rc); 1136 } 1137 #ifdef DEBUG 1138 if (pDesc->Gen.u1Present) 1139 Log(("LDT selector marked not present!!\n")); 1140 #endif 1141 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */, 1142 0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler"); 1143 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT) 1144 { 1145 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */ 1093 1146 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 1147 Log(("WARNING: Guest LDT (%RGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%016RX64:%04x)\n", 1148 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt)); 1094 1149 } 1095 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1096 return VINF_SUCCESS; 1097 } 1098 1150 else if (RT_SUCCESS(rc)) 1151 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt; 1152 else 1153 { 1154 CPUMSetHyperLDTR(pVCpu, 0); 1155 return rc; 1156 } 1157 1158 pVM->selm.s.cbLdtLimit = cbLdt; 1159 } 1160 } 1161 1162 /* 1163 * Calc Shadow LDT base. 1164 */ 1165 unsigned off; 1166 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK); 1167 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.pvLdtRC + off); 1168 PX86DESC pShadowLDT = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off); 1169 1170 /* 1171 * Enable the LDT selector in the shadow GDT. 1172 */ 1173 pDesc->Gen.u1Present = 1; 1174 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT); 1175 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT); 1176 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT); 1177 pDesc->Gen.u1Available = 0; 1178 pDesc->Gen.u1Long = 0; 1179 if (cbLdt > 0xffff) 1180 { 1181 cbLdt = 0xffff; 1182 pDesc->Gen.u4LimitHigh = 0; 1183 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff; 1184 } 1185 1186 /* 1187 * Set Hyper LDTR and notify TRPM. 1188 */ 1189 CPUMSetHyperLDTR(pVCpu, SelLdt); 1190 1191 /* 1192 * Loop synchronising the LDT page by page. 1193 */ 1194 /** @todo investigate how intel handle various operations on half present cross page entries. */ 1195 off = GCPtrLdt & (sizeof(X86DESC) - 1); 1196 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt)); 1197 1198 /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */ 1199 unsigned cbLeft = cbLdt + 1; 1200 PX86DESC pLDTE = pShadowLDT; 1201 while (cbLeft) 1202 { 1099 1203 /* 1100 * Get the LDT selector.1204 * Read a chunk. 1101 1205 */ 1102 PX86DESC pDesc = &pVM->selm.s.paGdtR3[SelLdt >> X86_SEL_SHIFT]; 1103 RTGCPTR GCPtrLdt = X86DESC_BASE(*pDesc); 1104 unsigned cbLdt = X86DESC_LIMIT(*pDesc); 1105 if (pDesc->Gen.u1Granularity) 1106 cbLdt = (cbLdt << PAGE_SHIFT) | PAGE_OFFSET_MASK; 1107 1108 /* 1109 * Validate it. 1110 */ 1111 if ( !cbLdt 1112 || SelLdt >= pVM->selm.s.GuestGdtr.cbGdt 1113 || pDesc->Gen.u1DescType 1114 || pDesc->Gen.u4Type != X86_SEL_TYPE_SYS_LDT) 1115 { 1116 AssertMsg(!cbLdt, ("Invalid LDT %04x!\n", SelLdt)); 1117 1118 /* cbLdt > 0: 1119 * This is quite impossible, so we do as most people do when faced with 1120 * the impossible, we simply ignore it. 1206 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK); 1207 if (cbChunk > cbLeft) 1208 cbChunk = cbLeft; 1209 rc = PGMPhysSimpleReadGCPtr(pVCpu, pShadowLDT, GCPtrLdt, cbChunk); 1210 if (RT_SUCCESS(rc)) 1211 { 1212 /* 1213 * Mark page 1121 1214 */ 1122 CPUMSetHyperLDTR(pVCpu, 0); 1123 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX) 1215 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D); 1216 AssertRC(rc); 1217 1218 /* 1219 * Loop thru the available LDT entries. 1220 * Figure out where to start and end and the potential cross pageness of 1221 * things adds a little complexity. pLDTE is updated there and not in the 1222 * 'next' part of the loop. The pLDTEEnd is inclusive. 1223 */ 1224 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pShadowLDT + cbChunk) - 1; 1225 if (pLDTE + 1 < pShadowLDT) 1226 pLDTE = (PX86DESC)((uintptr_t)pShadowLDT + off); 1227 while (pLDTE <= pLDTEEnd) 1124 1228 { 1125 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt); 1126 AssertRC(rc); 1127 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 1128 } 1129 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1130 return VINF_SUCCESS; 1131 } 1132 /** @todo check what intel does about odd limits. */ 1133 AssertMsg(RT_ALIGN(cbLdt + 1, sizeof(X86DESC)) == cbLdt + 1 && cbLdt <= 0xffff, ("cbLdt=%d\n", cbLdt)); 1134 1135 /* 1136 * Use the cached guest ldt address if the descriptor has already been modified (see below) 1137 * (this is necessary due to redundant LDT updates; see todo above at GDT sync) 1138 */ 1139 if (MMHyperIsInsideArea(pVM, GCPtrLdt)) 1140 GCPtrLdt = pVM->selm.s.GCPtrGuestLdt; /* use the old one */ 1141 1142 1143 /** @todo Handle only present LDT segments. */ 1144 // if (pDesc->Gen.u1Present) 1145 { 1146 /* 1147 * Check if Guest's LDT address/limit is changed. 1148 */ 1149 if ( GCPtrLdt != pVM->selm.s.GCPtrGuestLdt 1150 || cbLdt != pVM->selm.s.cbLdtLimit) 1151 { 1152 Log(("SELMR3UpdateFromCPUM: Guest LDT changed to from %RGv:%04x to %RGv:%04x. (GDTR=%016RX64:%04x)\n", 1153 pVM->selm.s.GCPtrGuestLdt, pVM->selm.s.cbLdtLimit, GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt)); 1154 1155 /* 1156 * [Re]Register write virtual handler for guest's GDT. 1157 * In the event of LDT overlapping something, don't install it just assume it's being updated. 1158 */ 1159 if (pVM->selm.s.GCPtrGuestLdt != RTRCPTR_MAX) 1229 if (pLDTE->Gen.u1Present) 1160 1230 { 1161 rc = PGMHandlerVirtualDeregister(pVM, pVM->selm.s.GCPtrGuestLdt); 1162 AssertRC(rc); 1163 } 1164 #ifdef DEBUG 1165 if (pDesc->Gen.u1Present) 1166 Log(("LDT selector marked not present!!\n")); 1167 #endif 1168 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_WRITE, GCPtrLdt, GCPtrLdt + cbLdt /* already inclusive */, 1169 0, selmR3GuestLDTWriteHandler, "selmRCGuestLDTWriteHandler", 0, "Guest LDT write access handler"); 1170 if (rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT) 1171 { 1172 /** @todo investigate the various cases where conflicts happen and try avoid them by enh. the instruction emulation. */ 1173 pVM->selm.s.GCPtrGuestLdt = RTRCPTR_MAX; 1174 Log(("WARNING: Guest LDT (%RGv:%04x) conflicted with existing access range!! Assumes LDT is begin updated. (GDTR=%016RX64:%04x)\n", 1175 GCPtrLdt, cbLdt, pVM->selm.s.GuestGdtr.pGdt, pVM->selm.s.GuestGdtr.cbGdt)); 1176 } 1177 else if (RT_SUCCESS(rc)) 1178 pVM->selm.s.GCPtrGuestLdt = GCPtrLdt; 1179 else 1180 { 1181 CPUMSetHyperLDTR(pVCpu, 0); 1182 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1183 return rc; 1184 } 1185 1186 pVM->selm.s.cbLdtLimit = cbLdt; 1187 } 1188 } 1189 1190 /* 1191 * Calc Shadow LDT base. 1192 */ 1193 unsigned off; 1194 pVM->selm.s.offLdtHyper = off = (GCPtrLdt & PAGE_OFFSET_MASK); 1195 RTGCPTR GCPtrShadowLDT = (RTGCPTR)((RTGCUINTPTR)pVM->selm.s.pvLdtRC + off); 1196 PX86DESC pShadowLDT = (PX86DESC)((uintptr_t)pVM->selm.s.pvLdtR3 + off); 1197 1198 /* 1199 * Enable the LDT selector in the shadow GDT. 1200 */ 1201 pDesc->Gen.u1Present = 1; 1202 pDesc->Gen.u16BaseLow = RT_LOWORD(GCPtrShadowLDT); 1203 pDesc->Gen.u8BaseHigh1 = RT_BYTE3(GCPtrShadowLDT); 1204 pDesc->Gen.u8BaseHigh2 = RT_BYTE4(GCPtrShadowLDT); 1205 pDesc->Gen.u1Available = 0; 1206 pDesc->Gen.u1Long = 0; 1207 if (cbLdt > 0xffff) 1208 { 1209 cbLdt = 0xffff; 1210 pDesc->Gen.u4LimitHigh = 0; 1211 pDesc->Gen.u16LimitLow = pDesc->Gen.u1Granularity ? 0xf : 0xffff; 1212 } 1213 1214 /* 1215 * Set Hyper LDTR and notify TRPM. 1216 */ 1217 CPUMSetHyperLDTR(pVCpu, SelLdt); 1218 1219 /* 1220 * Loop synchronising the LDT page by page. 1221 */ 1222 /** @todo investigate how intel handle various operations on half present cross page entries. */ 1223 off = GCPtrLdt & (sizeof(X86DESC) - 1); 1224 AssertMsg(!off, ("LDT is not aligned on entry size! GCPtrLdt=%08x\n", GCPtrLdt)); 1225 1226 /* Note: Do not skip the first selector; unlike the GDT, a zero LDT selector is perfectly valid. */ 1227 unsigned cbLeft = cbLdt + 1; 1228 PX86DESC pLDTE = pShadowLDT; 1229 while (cbLeft) 1230 { 1231 /* 1232 * Read a chunk. 1233 */ 1234 unsigned cbChunk = PAGE_SIZE - ((RTGCUINTPTR)GCPtrLdt & PAGE_OFFSET_MASK); 1235 if (cbChunk > cbLeft) 1236 cbChunk = cbLeft; 1237 rc = PGMPhysSimpleReadGCPtr(pVCpu, pShadowLDT, GCPtrLdt, cbChunk); 1238 if (RT_SUCCESS(rc)) 1239 { 1240 /* 1241 * Mark page 1242 */ 1243 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D); 1244 AssertRC(rc); 1245 1246 /* 1247 * Loop thru the available LDT entries. 1248 * Figure out where to start and end and the potential cross pageness of 1249 * things adds a little complexity. pLDTE is updated there and not in the 1250 * 'next' part of the loop. The pLDTEEnd is inclusive. 1251 */ 1252 PX86DESC pLDTEEnd = (PX86DESC)((uintptr_t)pShadowLDT + cbChunk) - 1; 1253 if (pLDTE + 1 < pShadowLDT) 1254 pLDTE = (PX86DESC)((uintptr_t)pShadowLDT + off); 1255 while (pLDTE <= pLDTEEnd) 1256 { 1257 if (pLDTE->Gen.u1Present) 1231 /* 1232 * Code and data selectors are generally 1:1, with the 1233 * 'little' adjustment we do for DPL 0 selectors. 1234 */ 1235 if (pLDTE->Gen.u1DescType) 1258 1236 { 1259 1237 /* 1260 * Code and data selectors are generally 1:1, with the 1261 * 'little' adjustment we do for DPL 0 selectors. 1238 * Hack for A-bit against Trap E on read-only GDT. 1262 1239 */ 1263 if (pLDTE->Gen.u1DescType) 1264 { 1265 /* 1266 * Hack for A-bit against Trap E on read-only GDT. 1267 */ 1268 /** @todo Fix this by loading ds and cs before turning off WP. */ 1269 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1270 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1271 1272 /* 1273 * All DPL 0 code and data segments are squeezed into DPL 1. 1274 * 1275 * We're skipping conforming segments here because those 1276 * cannot give us any trouble. 1277 */ 1278 if ( pLDTE->Gen.u2Dpl == 0 1279 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 1280 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 1281 pLDTE->Gen.u2Dpl = 1; 1282 } 1283 else 1284 { 1285 /* 1286 * System type selectors are marked not present. 1287 * Recompiler or special handling is required for these. 1288 */ 1289 /** @todo what about interrupt gates and rawr0? */ 1290 pLDTE->Gen.u1Present = 0; 1291 } 1240 /** @todo Fix this by loading ds and cs before turning off WP. */ 1241 if (!(pLDTE->Gen.u4Type & X86_SEL_TYPE_ACCESSED)) 1242 pLDTE->Gen.u4Type |= X86_SEL_TYPE_ACCESSED; 1243 1244 /* 1245 * All DPL 0 code and data segments are squeezed into DPL 1. 1246 * 1247 * We're skipping conforming segments here because those 1248 * cannot give us any trouble. 1249 */ 1250 if ( pLDTE->Gen.u2Dpl == 0 1251 && (pLDTE->Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF)) 1252 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF) ) 1253 pLDTE->Gen.u2Dpl = 1; 1292 1254 } 1293 1294 /* Next LDT entry. */ 1295 pLDTE++; 1255 else 1256 { 1257 /* 1258 * System type selectors are marked not present. 1259 * Recompiler or special handling is required for these. 1260 */ 1261 /** @todo what about interrupt gates and rawr0? */ 1262 pLDTE->Gen.u1Present = 0; 1263 } 1296 1264 } 1265 1266 /* Next LDT entry. */ 1267 pLDTE++; 1297 1268 } 1298 else 1299 { 1300 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%Rrc\n", rc)); 1301 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0); 1302 AssertRC(rc); 1303 } 1304 1305 /* 1306 * Advance to the next page. 1307 */ 1308 cbLeft -= cbChunk; 1309 GCPtrShadowLDT += cbChunk; 1310 pShadowLDT = (PX86DESC)((char *)pShadowLDT + cbChunk); 1311 GCPtrLdt += cbChunk; 1312 } 1313 } 1269 } 1270 else 1271 { 1272 AssertMsg(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("rc=%Rrc\n", rc)); 1273 rc = PGMMapSetPage(pVM, GCPtrShadowLDT & PAGE_BASE_GC_MASK, PAGE_SIZE, 0); 1274 AssertRC(rc); 1275 } 1276 1277 /* 1278 * Advance to the next page. 1279 */ 1280 cbLeft -= cbChunk; 1281 GCPtrShadowLDT += cbChunk; 1282 pShadowLDT = (PX86DESC)((char *)pShadowLDT + cbChunk); 1283 GCPtrLdt += cbChunk; 1284 } 1285 1286 return VINF_SUCCESS; 1287 } 1288 1289 1290 /** 1291 * Updates the Guest GDT & LDT virtualization based on current CPU state. 1292 * 1293 * @returns VBox status code. 1294 * @param pVM Pointer to the VM. 1295 * @param pVCpu Pointer to the VMCPU. 1296 */ 1297 VMMR3DECL(int) SELMR3UpdateFromCPUM(PVM pVM, PVMCPU pVCpu) 1298 { 1299 if (pVM->selm.s.fDisableMonitoring) 1300 { 1301 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_GDT); 1302 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_LDT); 1303 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 1304 return VINF_SUCCESS; 1305 } 1306 1307 STAM_PROFILE_START(&pVM->selm.s.StatUpdateFromCPUM, a); 1308 1309 /* 1310 * GDT sync 1311 */ 1312 int rc; 1313 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_GDT)) 1314 { 1315 rc = selmR3UpdateShadowGdt(pVM, pVCpu); 1316 if (RT_FAILURE(rc)) 1317 return rc; /* We're toast, so forget the profiling. */ 1318 AssertRCSuccess(rc); 1319 } 1320 1321 /* 1322 * TSS sync 1323 */ 1324 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_TSS)) 1325 { 1326 rc = SELMR3SyncTSS(pVM, pVCpu); 1327 if (RT_FAILURE(rc)) 1328 return rc; 1329 AssertRCSuccess(rc); 1330 } 1331 1332 /* 1333 * LDT sync 1334 */ 1335 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_SELM_SYNC_LDT)) 1336 { 1337 rc = selmR3UpdateShadowLdt(pVM, pVCpu); 1338 if (RT_FAILURE(rc)) 1339 return rc; 1340 AssertRCSuccess(rc); 1341 } 1342 1343 #if 0 1344 /* 1345 * Check for stale selectors and load hidden register bits where they 1346 * are missing. 1347 */ 1348 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); 1349 #endif 1350 rc = VINF_SUCCESS; 1314 1351 1315 1352 STAM_PROFILE_STOP(&pVM->selm.s.StatUpdateFromCPUM, a); 1316 return VINF_SUCCESS;1353 return rc; 1317 1354 } 1318 1355
Note:
See TracChangeset
for help on using the changeset viewer.