Changeset 49523 in vbox for trunk/src/VBox
- Timestamp:
- Nov 18, 2013 10:59:01 AM (11 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0A.asm
r48964 r49523 84 84 ; @param 1 full width register name 85 85 ; @param 2 16-bit register name for \a 1. 86 87 %ifdef MAYBE_64_BIT88 ; Save a host and load the corresponding guest MSR (trashes rdx & rcx)89 %macro LOADGUESTMSR 290 mov rcx, %191 rdmsr92 push rdx93 push rax94 mov edx, dword [xSI + %2 + 4]95 mov eax, dword [xSI + %2]96 wrmsr97 %endmacro98 99 ; Save a guest and load the corresponding host MSR (trashes rdx & rcx)100 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)101 %macro LOADHOSTMSREX 2102 mov rcx, %1103 rdmsr104 mov dword [xSI + %2], eax105 mov dword [xSI + %2 + 4], edx106 pop rax107 pop rdx108 wrmsr109 %endmacro110 111 ; Load the corresponding host MSR (trashes rdx & rcx)112 %macro LOADHOSTMSR 1113 mov rcx, %1114 pop rax115 pop rdx116 wrmsr117 %endmacro118 %endif119 86 120 87 %ifdef ASM_CALL64_GCC … … 156 123 157 124 %ifdef VBOX_SKIP_RESTORE_SEG 158 %macro MYPUSHSEGS64 2159 %endmacro160 161 %macro MYPOPSEGS64 2162 %endmacro163 %else ; !VBOX_SKIP_RESTORE_SEG164 ; trashes, rax, rdx & rcx165 %macro MYPUSHSEGS64 2166 %ifndef HM_64_BIT_USE_NULL_SEL125 %macro MYPUSHSEGS64 2 126 %endmacro 127 128 %macro MYPOPSEGS64 2 129 %endmacro 130 %else ; !VBOX_SKIP_RESTORE_SEG 131 ; trashes, rax, rdx & rcx 132 %macro MYPUSHSEGS64 2 133 %ifndef HM_64_BIT_USE_NULL_SEL 167 134 mov %2, es 168 135 push %1 169 136 mov %2, ds 170 137 push %1 171 %endif138 %endif 172 139 173 140 ; Special case for FS; Windows and Linux either don't use it or restore it when leaving kernel mode, Solaris OTOH doesn't and we must save it. … … 176 143 push rdx 177 144 push rax 178 %ifndef HM_64_BIT_USE_NULL_SEL145 %ifndef HM_64_BIT_USE_NULL_SEL 179 146 push fs 180 %endif147 %endif 181 148 182 149 ; Special case for GS; OSes typically use swapgs to reset the hidden base register for GS on entry into the kernel. The same happens on exit … … 185 152 push rdx 186 153 push rax 187 %ifndef HM_64_BIT_USE_NULL_SEL154 %ifndef HM_64_BIT_USE_NULL_SEL 188 155 push gs 189 %endif190 %endmacro191 192 ; trashes, rax, rdx & rcx193 %macro MYPOPSEGS64 2156 %endif 157 %endmacro 158 159 ; trashes, rax, rdx & rcx 160 %macro MYPOPSEGS64 2 194 161 ; Note: do not step through this code with a debugger! 195 %ifndef HM_64_BIT_USE_NULL_SEL162 %ifndef HM_64_BIT_USE_NULL_SEL 196 163 xor eax, eax 197 164 mov ds, ax … … 199 166 mov fs, ax 200 167 mov gs, ax 201 %endif202 203 %ifndef HM_64_BIT_USE_NULL_SEL168 %endif 169 170 %ifndef HM_64_BIT_USE_NULL_SEL 204 171 pop gs 205 %endif172 %endif 206 173 pop rax 207 174 pop rdx … … 209 176 wrmsr 210 177 211 %ifndef HM_64_BIT_USE_NULL_SEL178 %ifndef HM_64_BIT_USE_NULL_SEL 212 179 pop fs 213 %endif180 %endif 214 181 pop rax 215 182 pop rdx … … 218 185 ; Now it's safe to step again 219 186 220 %ifndef HM_64_BIT_USE_NULL_SEL187 %ifndef HM_64_BIT_USE_NULL_SEL 221 188 pop %1 222 189 mov ds, %2 223 190 pop %1 224 191 mov es, %2 225 %endif226 %endmacro192 %endif 193 %endmacro 227 194 %endif ; VBOX_SKIP_RESTORE_SEG 228 195 -
trunk/src/VBox/VMM/VMMR0/HMR0Mixed.mac
r49018 r49523 356 356 pop xSI ; pCtx (needed in rsi by the macros below) 357 357 358 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE359 ; Save the guest MSRs and load the host MSRs.360 LOADHOSTMSREX MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE361 LOADHOSTMSREX MSR_K8_SF_MASK, CPUMCTX.msrSFMASK362 LOADHOSTMSREX MSR_K6_STAR, CPUMCTX.msrSTAR363 LOADHOSTMSREX MSR_K8_LSTAR, CPUMCTX.msrLSTAR364 %endif365 366 358 %ifdef VMX_USE_CACHED_VMCS_ACCESSES 367 359 pop xDX ; Saved pCache … … 454 446 ; Save the pCache pointer. 455 447 push xBX 456 %endif457 458 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE459 ; Save the host MSRs and load the guest MSRs.460 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR461 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR462 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK463 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE464 448 %endif 465 449 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r49521 r49523 865 865 AssertPtr(pVCpu); 866 866 867 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE868 867 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr); 869 868 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); 870 #endif871 869 872 870 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) … … 917 915 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb); 918 916 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv); 919 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE920 917 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv); 921 918 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv); 922 #endif923 919 } 924 920 #undef VMXLOCAL_INIT_VMCPU_MEMOBJ … … 983 979 } 984 980 985 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE986 981 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */ 987 982 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr); … … 993 988 if (RT_FAILURE(rc)) 994 989 goto cleanup; 995 #endif996 990 } 997 991 … … 1201 1195 1202 1196 1203 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE1204 1197 /** 1205 1198 * Updates the VMCS with the number of effective MSRs in the auto-load/store MSR … … 1449 1442 } 1450 1443 # endif /* VBOX_STRICT */ 1451 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */1452 1444 1453 1445 … … 2307 2299 #endif 2308 2300 2309 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE 2310 /* Setup MSR autoloading/storing. */ 2301 /* Setup MSR auto-load/store area. */ 2311 2302 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr); 2312 2303 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */ … … 2320 2311 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr); 2321 2312 AssertRCReturn(rc, rc); 2322 #endif2323 2313 2324 2314 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */ … … 2840 2830 2841 2831 int rc = VINF_SUCCESS; 2842 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE2843 2832 #if 0 2844 2833 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; … … 2939 2928 if (pVCpu->hm.s.vmx.cMsrs > 0) 2940 2929 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu); 2941 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */2942 2930 2943 2931 /* … … 4374 4362 if (VMCPU_HMCF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 4375 4363 { 4376 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE4377 4364 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */ 4378 4365 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 4391 4378 Log4(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value)); 4392 4379 # endif 4393 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */4394 4395 4380 VMCPU_HMCF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4396 4381 } … … 5769 5754 return VINF_SUCCESS; 5770 5755 5771 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE5772 5756 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 5773 5757 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", pVCpu->hm.s.vmx.cMsrs)); … … 5788 5772 } 5789 5773 } 5790 #endif5791 5774 5792 5775 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS; … … 7965 7948 to start executing. */ 7966 7949 7967 /** @todo Get rid of VBOX_WITH_AUTO_MSR_LOAD_RESTORE define. */7968 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE7969 7950 /* 7970 7951 * Load the TSC_AUX MSR when we are not intercepting RDTSCP. … … 7984 7965 #ifdef VBOX_STRICT 7985 7966 hmR0VmxCheckAutoLoadStoreMsrs(pVCpu); 7986 #endif7987 #else7988 /*7989 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that7990 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.7991 */7992 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)7993 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))7994 {7995 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);7996 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAuxMsr);7997 AssertRC(rc2);7998 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAuxMsr);7999 }8000 7967 #endif 8001 7968 } … … 8035 8002 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 8036 8003 { 8037 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE8038 8004 /* VT-x restored the host TSC_AUX MSR for us, update the guest value from the VMCS area 8039 8005 if it could have changed without causing a VM-exit. */ … … 8043 8009 AssertRC(rc2); 8044 8010 } 8045 #else8046 /* Update guest's TSC_AUX if it could have changed. */8047 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)8048 {8049 uint64_t u64GuestTscAuxMsr = ASMRdMsr(MSR_K8_TSC_AUX);8050 CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, u64GuestTscAuxMsr);8051 }8052 /* Restore host's TSC_AUX. */8053 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);8054 #endif8055 8011 } 8056 8012 -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r49520 r49523 2924 2924 LogRel(("HM: CPU[%u] ExitCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32ExitCtls)); 2925 2925 LogRel(("HM: CPU[%u] HCPhysMsrBitmap %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysMsrBitmap)); 2926 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE2927 2926 LogRel(("HM: CPU[%u] HCPhysGuestMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysGuestMsr)); 2928 2927 LogRel(("HM: CPU[%u] HCPhysHostMsr %#RHp\n", i, pVCpu->hm.s.vmx.HCPhysHostMsr)); 2929 2928 LogRel(("HM: CPU[%u] cMsrs %u\n", i, pVCpu->hm.s.vmx.cMsrs)); 2930 #endif2931 2929 } 2932 2930 /** @todo Log VM-entry event injection control fields -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r48249 r49523 734 734 ; 735 735 736 737 738 ; Load the corresponding guest MSR (trashes rdx & rcx)739 %macro LOADGUESTMSR 2740 mov rcx, %1741 mov edx, dword [rsi + %2 + 4]742 mov eax, dword [rsi + %2]743 wrmsr744 %endmacro745 746 ; Save a guest MSR (trashes rdx & rcx)747 ; Only really useful for gs kernel base as that one can be changed behind our back (swapgs)748 %macro SAVEGUESTMSR 2749 mov rcx, %1750 rdmsr751 mov dword [rsi + %2], eax752 mov dword [rsi + %2 + 4], edx753 %endmacro754 755 736 ;; @def MYPUSHSEGS 756 737 ; Macro saving all segment registers on the stack. … … 932 913 ; - DR7 (reset to 0x400) 933 914 ; - EFLAGS (reset to RT_BIT(1); not relevant) 934 935 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE936 ; Load the guest LSTAR, CSTAR, SFMASK & KERNEL_GSBASE MSRs.937 LOADGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR938 LOADGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR939 LOADGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK940 LOADGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE941 %endif942 915 943 916 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 1018 991 1019 992 pop rsi ; pCtx (needed in rsi by the macros below) 1020 1021 %ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE1022 SAVEGUESTMSR MSR_K8_KERNEL_GS_BASE, CPUMCTX.msrKERNELGSBASE1023 SAVEGUESTMSR MSR_K8_SF_MASK, CPUMCTX.msrSFMASK1024 SAVEGUESTMSR MSR_K6_STAR, CPUMCTX.msrSTAR1025 SAVEGUESTMSR MSR_K8_LSTAR, CPUMCTX.msrLSTAR1026 %endif1027 993 1028 994 %ifdef VMX_USE_CACHED_VMCS_ACCESSES -
trunk/src/VBox/VMM/include/HMInternal.h
r49520 r49523 47 47 # define HM_PROFILE_EXIT_DISPATCH 48 48 #endif 49 50 /* The MSR auto load/store used to not work for KERNEL_GS_BASE MSR, thus we51 * used to handle this MSR manually. See @bugref{6208}. This was clearly visible while52 * booting Solaris 11 (11.1 b19) VMs with 2 Cpus. This is no longer the case and we53 * always auto load/store the KERNEL_GS_BASE MSR.54 *55 * Note: don't forget to update the assembly files while modifying this!56 */57 /** @todo This define should always be in effect and the define itself removed58 after 'sufficient' testing. */59 # define VBOX_WITH_AUTO_MSR_LOAD_RESTORE60 49 61 50 RT_C_DECLS_BEGIN … … 587 576 R0PTRTYPE(void *) pvMsrBitmap; 588 577 589 #ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE590 578 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used 591 579 * for guest MSRs). */ … … 610 598 bool fUpdatedHostMsrs; 611 599 uint8_t u8Align[7]; 612 #endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */613 600 614 601 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */ -
trunk/src/VBox/VMM/include/HMInternal.mac
r47652 r49523 20 20 %endif 21 21 %endif 22 23 %define VBOX_WITH_AUTO_MSR_LOAD_RESTORE24 22 25 23 ;Maximum number of cached entries.
Note:
See TracChangeset
for help on using the changeset viewer.