Changeset 47844 in vbox for trunk/src/VBox
- Timestamp:
- Aug 19, 2013 2:03:17 PM (12 years ago)
- svn:sync-xref-src-repo-rev:
- 88134
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r47715 r47844 48 48 /** Indicates that the entry is in use and have valid data. */ 49 49 bool fEnabled; 50 /** Whether it's operating in X2APIC mode (EXTD). */ 51 bool fX2Apic; 52 /** The APIC version number. */ 53 uint32_t uVersion; 50 54 /** Has APIC_REG_LVT_THMR. Not used. */ 51 55 uint32_t fHasThermal; … … 781 785 782 786 /** 783 * Worker for cpumR0MapLocalApics. Check each CPU for a present Local APIC. 784 * Play safe and treat each CPU separate. 787 * Per-CPU callback that probes the CPU for APIC support. 785 788 * 786 789 * @param idCpu The identifier for the CPU the function is called on. … … 788 791 * @param pvUser2 Ignored. 789 792 */ 790 static DECLCALLBACK(void) cpumR0MapLocalApic Worker(RTCPUID idCpu, void *pvUser1, void *pvUser2)793 static DECLCALLBACK(void) cpumR0MapLocalApicCpuProber(RTCPUID idCpu, void *pvUser1, void *pvUser2) 791 794 { 792 795 NOREF(pvUser1); NOREF(pvUser2); … … 794 797 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics)); 795 798 799 /* 800 * Check for APIC support. 801 */ 796 802 uint32_t uMaxLeaf, u32EBX, u32ECX, u32EDX; 797 803 ASMCpuId(0, &uMaxLeaf, &u32EBX, &u32ECX, &u32EDX); … … 806 812 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR)) 807 813 { 814 /* 815 * Safe to access the MSR. Read it and calc the BASE (a little complicated). 816 */ 808 817 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE); 809 uint64_t u64Mask = UINT64_C(0x0000000ffffff000);818 uint64_t u64Mask = MSR_IA32_APICBASE_BASE_MIN; 810 819 811 820 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */ … … 821 830 } 822 831 823 uint64_t const u64PhysBase = u64ApicBase & u64Mask; 824 g_aLApics[iCpu].PhysBase = (RTHCPHYS)u64PhysBase; 825 g_aLApics[iCpu].fEnabled = g_aLApics[iCpu].PhysBase == u64PhysBase; 826 } 832 AssertCompile(sizeof(g_aLApics[iCpu].PhysBase) == sizeof(u64ApicBase)); 833 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask; 834 g_aLApics[iCpu].fEnabled = RT_BOOL(u64ApicBase & MSR_IA32_APICBASE_EN); 835 g_aLApics[iCpu].fX2Apic = (u64ApicBase & (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN)) 836 == (MSR_IA32_APICBASE_EXTD | MSR_IA32_APICBASE_EN); 837 } 838 } 839 } 840 841 842 843 /** 844 * Per-CPU callback that verifies our APIC expectations. 845 * 846 * @param idCpu The identifier for the CPU the function is called on. 847 * @param pvUser1 Ignored. 848 * @param pvUser2 Ignored. 849 */ 850 static DECLCALLBACK(void) cpumR0MapLocalApicCpuChecker(RTCPUID idCpu, void *pvUser1, void *pvUser2) 851 { 852 int iCpu = RTMpCpuIdToSetIndex(idCpu); 853 AssertReturnVoid(iCpu >= 0 && (unsigned)iCpu < RT_ELEMENTS(g_aLApics)); 854 if (!g_aLApics[iCpu].fEnabled) 855 return; 856 857 /* 858 * 0x0X 82489 external APIC 859 * 0x1X Local APIC 860 * 0x2X..0xFF reserved 861 */ 862 uint32_t uApicVersion; 863 if (g_aLApics[iCpu].fX2Apic) 864 uApicVersion = ApicX2RegRead32(APIC_REG_VERSION); 865 else 866 uApicVersion = ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_VERSION); 867 if ((APIC_REG_VERSION_GET_VER(uApicVersion) & 0xF0) == 0x10) 868 { 869 g_aLApics[iCpu].uVersion = uApicVersion; 870 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(uApicVersion) >= 5; 871 872 #if 0 /* enable if you need it. */ 873 if (g_aLApics[iCpu].fX2Apic) 874 SUPR0Printf("CPUM: X2APIC %02u at - ver %#x, lint0=%#x lint1=%#x pc=%#x thmr=%#x\n", 875 iCpu, uApicVersion, 876 ApicX2RegRead32(APIC_REG_LVT_LINT0), ApicX2RegRead32(APIC_REG_LVT_LINT1), 877 ApicX2RegRead32(APIC_REG_LVT_PC), ApicX2RegRead32(APIC_REG_LVT_THMR) ); 878 else 879 SUPR0Printf("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#x, lint0=%#x lint1=%#x pc=%#x thmr=%#x\n", 880 iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, uApicVersion, 881 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT0), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_LINT1), 882 ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_PC), ApicRegRead(g_aLApics[iCpu].pv, APIC_REG_LVT_THMR) ); 883 #endif 884 } 885 else 886 { 887 g_aLApics[iCpu].fEnabled = false; 888 g_aLApics[iCpu].fX2Apic = false; 889 SUPR0Printf("VBox/CPUM: Unsupported APIC version %#x (iCpu=%d)\n", uApicVersion, iCpu); 827 890 } 828 891 } … … 844 907 845 908 /* 846 * Create mappings for all online CPUs we think have APICs. 847 */ 848 /** @todo r=bird: This code is not adequately handling CPUs that are 849 * offline or unplugged at init time and later bought into action. */ 850 int rc = RTMpOnAll(cpumR0MapLocalApicWorker, NULL, NULL); 909 * Create mappings for all online CPUs we think have legacy APICs. 910 */ 911 int rc = RTMpOnAll(cpumR0MapLocalApicCpuProber, NULL, NULL); 851 912 852 913 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++) 853 914 { 854 if (g_aLApics[iCpu].fEnabled )915 if (g_aLApics[iCpu].fEnabled && !g_aLApics[iCpu].fX2Apic) 855 916 { 856 917 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase, … … 862 923 if (RT_SUCCESS(rc)) 863 924 { 864 void *pvApicBase = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj); 865 866 /* 867 * 0x0X 82489 external APIC 868 * 0x1X Local APIC 869 * 0x2X..0xFF reserved 870 */ 871 /** @todo r=bird: The local APIC is usually at the same address for all CPUs, 872 * and therefore inaccessible by the other CPUs. */ 873 uint32_t ApicVersion = ApicRegRead(pvApicBase, APIC_REG_VERSION); 874 if ((APIC_REG_VERSION_GET_VER(ApicVersion) & 0xF0) == 0x10) 875 { 876 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(ApicVersion) >= 5; 877 g_aLApics[iCpu].pv = pvApicBase; 878 Log(("CPUM: APIC %02u at %RGp (mapped at %p) - ver %#x, lint0=%#x lint1=%#x pc=%#x thmr=%#x\n", 879 iCpu, g_aLApics[iCpu].PhysBase, g_aLApics[iCpu].pv, ApicVersion, 880 ApicRegRead(pvApicBase, APIC_REG_LVT_LINT0), 881 ApicRegRead(pvApicBase, APIC_REG_LVT_LINT1), 882 ApicRegRead(pvApicBase, APIC_REG_LVT_PC), 883 ApicRegRead(pvApicBase, APIC_REG_LVT_THMR) 884 )); 885 continue; 886 } 887 888 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */); 925 g_aLApics[iCpu].pv = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj); 926 continue; 889 927 } 890 928 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */); … … 892 930 g_aLApics[iCpu].fEnabled = false; 893 931 } 894 } 932 g_aLApics[iCpu].pv = NULL; 933 } 934 935 /* 936 * Check the APICs. 937 */ 938 if (RT_SUCCESS(rc)) 939 rc = RTMpOnAll(cpumR0MapLocalApicCpuChecker, NULL, NULL); 940 895 941 if (RT_FAILURE(rc)) 896 942 { … … 898 944 return rc; 899 945 } 946 947 #ifdef LOG_ENABLED 948 /* 949 * Log the result (pretty useless, requires enabling CPUM in VBoxDrv 950 * and !VBOX_WITH_R0_LOGGING). 951 */ 952 if (LogIsEnabled()) 953 { 954 uint32_t cEnabled = 0; 955 uint32_t cX2Apics = 0; 956 for (unsigned iCpu = 0; iCpu < RT_ELEMENTS(g_aLApics); iCpu++) 957 if (g_aLApics[iCpu].fEnabled) 958 { 959 cEnabled++; 960 cX2Apics += g_aLApics[iCpu].fX2Apic; 961 } 962 Log(("CPUM: %u APICs, %u X2APICs\n", cEnabled, cX2Apics)); 963 } 964 #endif 900 965 901 966 return VINF_SUCCESS; … … 917 982 g_aLApics[iCpu].hMemObj = NIL_RTR0MEMOBJ; 918 983 g_aLApics[iCpu].fEnabled = false; 984 g_aLApics[iCpu].fX2Apic = false; 919 985 g_aLApics[iCpu].pv = NULL; 920 986 } … … 924 990 925 991 /** 926 * Write the Local APIC mapping address of the current host CPU to CPUM to be 927 * able to access the APIC registers in the raw mode switcher for disabling/ 928 * re-enabling the NMI. Must be called with disabled preemption or disabled 929 * interrupts! 930 * 931 * @param pVM Pointer to the VM. 992 * Updates CPUMCPU::pvApicBase and CPUMCPU::fX2Apic prior to world switch. 993 * 994 * Writes the Local APIC mapping address of the current host CPU to CPUMCPU so 995 * the world switchers can access the APIC registers for the purpose of 996 * disabling and re-enabling the NMIs. Must be called with disabled preemption 997 * or disabled interrupts! 998 * 999 * @param pVCpu Pointer to the cross context CPU structure of the 1000 * calling EMT. 932 1001 * @param idHostCpu The ID of the current host CPU. 933 1002 */ 934 VMMR0_INT_DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu) 935 { 936 pVM->cpum.s.pvApicBase = g_aLApics[RTMpCpuIdToSetIndex(idHostCpu)].pv; 1003 VMMR0_INT_DECL(void) CPUMR0SetLApic(PVMCPU pVCpu, RTCPUID idHostCpu) 1004 { 1005 int idxCpu = RTMpCpuIdToSetIndex(idHostCpu); 1006 pVCpu->cpum.s.pvApicBase = g_aLApics[idxCpu].pv; 1007 pVCpu->cpum.s.fX2Apic = g_aLApics[idxCpu].fX2Apic; 1008 // Log6(("CPUMR0SetLApic: pvApicBase=%p fX2Apic=%d\n", g_aLApics[idxCpu].pv, g_aLApics[idxCpu].fX2Apic)); 937 1009 } 938 1010 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r47803 r47844 982 982 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 983 983 RTCPUID idHostCpu = RTMpCpuId(); 984 CPUMR0SetLApic(pV M, idHostCpu);984 CPUMR0SetLApic(pVCpu, idHostCpu); 985 985 #endif 986 986 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r47803 r47844 4341 4341 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 4342 4342 RTCPUID idHostCpu = RTMpCpuId(); 4343 CPUMR0SetLApic(pV M, idHostCpu);4343 CPUMR0SetLApic(pVCpu, idHostCpu); 4344 4344 #endif 4345 4345 -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r47760 r47844 773 773 RTCPUID idHostCpu = RTMpCpuId(); 774 774 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 775 CPUMR0SetLApic(pV M, idHostCpu);775 CPUMR0SetLApic(pVCpu, idHostCpu); 776 776 #endif 777 777 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); … … 1090 1090 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 1091 1091 RTCPUID idHostCpu = RTMpCpuId(); 1092 CPUMR0SetLApic( pVM, idHostCpu);1092 CPUMR0SetLApic(&pVM->aCpus[0], idHostCpu); 1093 1093 #endif 1094 1094 -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r47689 r47844 102 102 ; Unblock Local APIC NMI vectors 103 103 ; Do this here to ensure the host CS is already restored 104 mov ecx, [rdx + CPUM.fApicDisVectors] 105 mov r8, [rdx + CPUM.pvApicBase] 104 mov r8d, [rdx + CPUM.offCPUMCPU0] 105 mov ecx, [rdx + r8 + CPUMCPU.fApicDisVectors] 106 test ecx, ecx 107 jz gth64_apic_done 108 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1 109 je gth64_x2apic 110 111 ; Legacy APIC mode: 112 mov r8, [rdx + r8 + CPUMCPU.pvApicBase] 106 113 shr ecx, 1 107 114 jnc gth64_nolint0 … … 120 127 and dword [r8 + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED 121 128 gth64_notherm: 129 jmp gth64_apic_done 130 131 ; X2 APIC mode: 132 gth64_x2apic: 133 mov r8, rax ; save rax 134 mov r10, rcx 135 shr r10d, 1 136 jnc gth64_x2_nolint0 137 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4) 138 rdmsr 139 and eax, ~APIC_REG_LVT_MASKED 140 wrmsr 141 gth64_x2_nolint0: 142 shr r10d, 1 143 jnc gth64_x2_nolint1 144 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4) 145 rdmsr 146 and eax, ~APIC_REG_LVT_MASKED 147 wrmsr 148 gth64_x2_nolint1: 149 shr r10d, 1 150 jnc gth64_x2_nopc 151 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4) 152 rdmsr 153 and eax, ~APIC_REG_LVT_MASKED 154 wrmsr 155 gth64_x2_nopc: 156 shr r10d, 1 157 jnc gth64_x2_notherm 158 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4) 159 rdmsr 160 and eax, ~APIC_REG_LVT_MASKED 161 wrmsr 162 gth64_x2_notherm: 163 mov rax, r8 ; restore rax 164 165 gth64_apic_done: 122 166 %endif 123 167 … … 281 325 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 282 326 ; Block Local APIC NMI vectors 283 mov rbx, [rdx + CPUM.pvApicBase] 327 cmp byte [rdx + r8 + CPUMCPU.fX2Apic], 1 328 je htg_x2apic 329 mov rbx, [rdx + r8 + CPUMCPU.pvApicBase] 284 330 or rbx, rbx 285 jz htg_ noapic286 xor edi, edi 331 jz htg_apic_done 332 xor edi, edi ; fApicDisVectors 287 333 mov eax, [rbx + APIC_REG_LVT_LINT0] 288 334 mov ecx, eax … … 329 375 mov eax, [rbx + APIC_REG_LVT_THMR] ; write completion 330 376 htg_notherm: 331 mov [rdx + CPUM.fApicDisVectors], edi 332 htg_noapic: 377 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi 378 jmp htg_apic_done 379 380 ; X2APIC? 381 htg_x2apic: 382 mov r15, rdx ; save rdx 383 xor edi, edi ; fApicDisVectors 384 385 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4) 386 rdmsr 387 mov ebx, eax 388 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 389 cmp ebx, APIC_REG_LVT_MODE_NMI 390 jne htg_x2_nolint0 391 or edi, 0x01 392 or eax, APIC_REG_LVT_MASKED 393 wrmsr 394 htg_x2_nolint0: 395 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4) 396 rdmsr 397 mov ebx, eax 398 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 399 cmp ebx, APIC_REG_LVT_MODE_NMI 400 jne htg_x2_nolint1 401 or edi, 0x02 402 or eax, APIC_REG_LVT_MASKED 403 wrmsr 404 htg_x2_nolint1: 405 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4) 406 rdmsr 407 mov ebx, eax 408 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 409 cmp ebx, APIC_REG_LVT_MODE_NMI 410 jne htg_x2_nopc 411 or edi, 0x04 412 or eax, APIC_REG_LVT_MASKED 413 wrmsr 414 htg_x2_nopc: 415 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4) 416 rdmsr 417 shr eax, 16 418 cmp al, 5 419 jb htg_x2_notherm 420 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4) 421 rdmsr 422 mov ebx, eax 423 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 424 cmp ebx, APIC_REG_LVT_MODE_NMI 425 jne htg_x2_notherm 426 or edi, 0x08 427 or eax, APIC_REG_LVT_MASKED 428 wrmsr 429 htg_x2_notherm: 430 mov rdx, r15 431 mov [rdx + r8 + CPUMCPU.fApicDisVectors], edi 432 htg_apic_done: 433 333 434 %endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 334 435 -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r47660 r47844 123 123 124 124 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 125 CPUM_FROM_CPUMCPU(edx)126 125 ; Restore blocked Local APIC NMI vectors 127 mov ecx, [edx + CPUM.fApicDisVectors] 128 mov edx, [edx + CPUM.pvApicBase] 126 ; Do this here to ensure the host CS is already restored 127 mov ecx, [edx + CPUMCPU.fApicDisVectors] 128 test ecx, ecx 129 jz gth_apic_done 130 cmp byte [edx + CPUMCPU.fX2Apic], 1 131 je gth_x2apic 132 133 mov edx, [edx + CPUMCPU.pvApicBase] 129 134 shr ecx, 1 130 135 jnc gth_nolint0 … … 143 148 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED 144 149 gth_notherm: 150 jmp gth_apic_done 151 152 gth_x2apic: 153 push eax ; save eax 154 push ebx ; save it for fApicDisVectors 155 push edx ; save edx just in case. 156 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use 157 shr ebx, 1 158 jnc gth_x2_nolint0 159 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4) 160 rdmsr 161 and eax, ~APIC_REG_LVT_MASKED 162 wrmsr 163 gth_x2_nolint0: 164 shr ebx, 1 165 jnc gth_x2_nolint1 166 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4) 167 rdmsr 168 and eax, ~APIC_REG_LVT_MASKED 169 wrmsr 170 gth_x2_nolint1: 171 shr ebx, 1 172 jnc gth_x2_nopc 173 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4) 174 rdmsr 175 and eax, ~APIC_REG_LVT_MASKED 176 wrmsr 177 gth_x2_nopc: 178 shr ebx, 1 179 jnc gth_x2_notherm 180 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4) 181 rdmsr 182 and eax, ~APIC_REG_LVT_MASKED 183 wrmsr 184 gth_x2_notherm: 185 pop edx 186 pop ebx 187 pop eax 188 189 gth_apic_done: 145 190 %endif 146 191 … … 198 243 mov [edx + CPUMCPU.Host.esi], esi 199 244 mov [edx + CPUMCPU.Host.esp], esp 200 mov [edx + CPUMCPU.Host.ebp], ebp 245 mov [edx + CPUMCPU.Host.ebp], ebp ; offCpumCpu! 201 246 ; selectors. 202 247 mov [edx + CPUMCPU.Host.ds], ds … … 220 265 DEBUG32_S_CHAR('f') 221 266 DEBUG32_S_CHAR(';') 222 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp 223 mov ebx, [edx + CPUM.pvApicBase] 267 cmp byte [edx + CPUMCPU.pvApicBase], 1 268 je htg_x2apic 269 270 mov ebx, [edx + CPUMCPU.pvApicBase] 224 271 or ebx, ebx 225 jz htg_ noapic272 jz htg_apic_done 226 273 mov eax, [ebx + APIC_REG_LVT_LINT0] 227 274 mov ecx, eax … … 268 315 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion 269 316 htg_notherm: 270 mov [edx + CPUM.fApicDisVectors], edi 271 htg_noapic: 272 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 317 mov [edx + CPUMCPU.fApicDisVectors], edi 318 jmp htg_apic_done 319 320 htg_x2apic: 321 mov esi, edx ; Save edx. 322 xor edi, edi ; fApicDisVectors 323 324 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4) 325 rdmsr 326 mov ebx, eax 327 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 328 cmp ebx, APIC_REG_LVT_MODE_NMI 329 jne htg_x2_nolint0 330 or edi, 0x01 331 or eax, APIC_REG_LVT_MASKED 332 wrmsr 333 htg_x2_nolint0: 334 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4) 335 rdmsr 336 mov ebx, eax 337 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 338 cmp ebx, APIC_REG_LVT_MODE_NMI 339 jne htg_x2_nolint1 340 or edi, 0x02 341 or eax, APIC_REG_LVT_MASKED 342 wrmsr 343 htg_x2_nolint1: 344 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4) 345 rdmsr 346 mov ebx, eax 347 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 348 cmp ebx, APIC_REG_LVT_MODE_NMI 349 jne htg_x2_nopc 350 or edi, 0x04 351 or eax, APIC_REG_LVT_MASKED 352 wrmsr 353 htg_x2_nopc: 354 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4) 355 rdmsr 356 shr eax, 16 357 cmp al, 5 358 jb htg_x2_notherm 359 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4) 360 rdmsr 361 mov ebx, eax 362 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 363 cmp ebx, APIC_REG_LVT_MODE_NMI 364 jne htg_x2_notherm 365 or edi, 0x08 366 or eax, APIC_REG_LVT_MASKED 367 wrmsr 368 htg_x2_notherm: 369 mov edx, esi ; Restore edx. 370 mov [edx + CPUMCPU.fApicDisVectors], edi 371 372 htg_apic_done: 273 373 %endif 274 374 -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r47689 r47844 83 83 84 84 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 85 CPUM_FROM_CPUMCPU(edx)86 85 ; Restore blocked Local APIC NMI vectors 87 86 ; Do this here to ensure the host CS is already restored 88 mov ecx, [edx + CPUM.fApicDisVectors] 89 mov edx, [edx + CPUM.pvApicBase] 87 mov ecx, [edx + CPUMCPU.fApicDisVectors] 88 test ecx, ecx 89 jz gth_apic_done 90 cmp byte [edx + CPUMCPU.fX2Apic], 1 91 je gth_x2apic 92 93 mov edx, [edx + CPUMCPU.pvApicBase] 90 94 shr ecx, 1 91 95 jnc gth_nolint0 … … 104 108 and dword [edx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED 105 109 gth_notherm: 106 %endif 110 jmp gth_apic_done 111 112 gth_x2apic: 113 push eax ; save eax 114 push ebx ; save it for fApicDisVectors 115 push edx ; save edx just in case. 116 mov ebx, ecx ; ebx = fApicDisVectors, ecx free for MSR use 117 shr ebx, 1 118 jnc gth_x2_nolint0 119 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4) 120 rdmsr 121 and eax, ~APIC_REG_LVT_MASKED 122 wrmsr 123 gth_x2_nolint0: 124 shr ebx, 1 125 jnc gth_x2_nolint1 126 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4) 127 rdmsr 128 and eax, ~APIC_REG_LVT_MASKED 129 wrmsr 130 gth_x2_nolint1: 131 shr ebx, 1 132 jnc gth_x2_nopc 133 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4) 134 rdmsr 135 and eax, ~APIC_REG_LVT_MASKED 136 wrmsr 137 gth_x2_nopc: 138 shr ebx, 1 139 jnc gth_x2_notherm 140 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4) 141 rdmsr 142 and eax, ~APIC_REG_LVT_MASKED 143 wrmsr 144 gth_x2_notherm: 145 pop edx 146 pop ebx 147 pop eax 148 149 gth_apic_done: 150 %endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 107 151 108 152 %ifdef VBOX_WITH_STATISTICS … … 164 208 165 209 ; Block Local APIC NMI vectors 166 xor edi, edi167 168 210 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 169 mov esi, edx 170 CPUM_FROM_CPUMCPU(edx) 171 mov ebx, [edx + CPUM.pvApicBase] 211 cmp byte [edx + CPUMCPU.pvApicBase], 1 212 je htg_x2apic 213 214 mov ebx, [edx + CPUMCPU.pvApicBase] 172 215 or ebx, ebx 173 jz htg_noapic 216 jz htg_apic_done 217 xor edi, edi ; fApicDisVectors 218 174 219 mov eax, [ebx + APIC_REG_LVT_LINT0] 175 220 mov ecx, eax … … 216 261 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion 217 262 htg_notherm: 218 mov [edx + CPUM.fApicDisVectors], edi 219 htg_noapic: 220 mov edx, esi 221 %endif 263 mov [edx + CPUMCPU.fApicDisVectors], edi 264 jmp htg_apic_done 265 266 htg_x2apic: 267 mov esi, edx ; Save edx. 268 xor edi, edi ; fApicDisVectors 269 270 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT0 >> 4) 271 rdmsr 272 mov ebx, eax 273 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 274 cmp ebx, APIC_REG_LVT_MODE_NMI 275 jne htg_x2_nolint0 276 or edi, 0x01 277 or eax, APIC_REG_LVT_MASKED 278 wrmsr 279 htg_x2_nolint0: 280 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_LINT1 >> 4) 281 rdmsr 282 mov ebx, eax 283 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 284 cmp ebx, APIC_REG_LVT_MODE_NMI 285 jne htg_x2_nolint1 286 or edi, 0x02 287 or eax, APIC_REG_LVT_MASKED 288 wrmsr 289 htg_x2_nolint1: 290 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_PC >> 4) 291 rdmsr 292 mov ebx, eax 293 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 294 cmp ebx, APIC_REG_LVT_MODE_NMI 295 jne htg_x2_nopc 296 or edi, 0x04 297 or eax, APIC_REG_LVT_MASKED 298 wrmsr 299 htg_x2_nopc: 300 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_VERSION >> 4) 301 rdmsr 302 shr eax, 16 303 cmp al, 5 304 jb htg_x2_notherm 305 mov ecx, MSR_IA32_X2APIC_START + (APIC_REG_LVT_THMR >> 4) 306 rdmsr 307 mov ebx, eax 308 and ebx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 309 cmp ebx, APIC_REG_LVT_MODE_NMI 310 jne htg_x2_notherm 311 or edi, 0x08 312 or eax, APIC_REG_LVT_MASKED 313 wrmsr 314 htg_x2_notherm: 315 mov edx, esi ; Restore edx. 316 mov [edx + CPUMCPU.fApicDisVectors], edi 317 318 htg_apic_done: 319 %endif ; VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 222 320 223 321 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter. -
trunk/src/VBox/VMM/include/CPUMInternal.h
r47660 r47844 316 316 struct 317 317 { 318 uint32_t AndMask; 318 uint32_t AndMask; /**< @todo Move these to the per-CPU structure and fix the switchers. Saves a register! */ 319 319 uint32_t OrMask; 320 320 } CR4; … … 343 343 uint8_t abPadding2[4]; 344 344 #endif 345 346 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI347 RTHCPTR pvApicBase;348 uint32_t fApicDisVectors;349 uint8_t abPadding3[4];350 #endif351 345 } CPUM; 352 346 /** Pointer to the CPUM instance data residing in the shared VM structure. */ … … 406 400 uint32_t u32RetCode; 407 401 402 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 403 /** The address of the APIC mapping, NULL if no APIC. 404 * Call CPUMR0SetLApic to update this before doing a world switch. */ 405 RTHCPTR pvApicBase; 406 /** Used by the world switcher code to store which vectors needs restoring on 407 * the way back. */ 408 uint32_t fApicDisVectors; 409 /** Set if the CPU has the X2APIC mode enabled. 410 * Call CPUMR0SetLApic to update this before doing a world switch. */ 411 bool fX2Apic; 412 #else 413 uint8_t abPadding3[8+4+1]; 414 #endif 415 408 416 /** Have we entered raw-mode? */ 409 417 bool fRawEntered; … … 412 420 413 421 /** Align the structure on a 64-byte boundary. */ 414 uint8_t abPadding2[64 - 16 - 2];422 uint8_t abPadding2[64 - 16 - 8 - 4 - 1 - 2]; 415 423 } CPUMCPU; 416 424 /** Pointer to the CPUMCPU instance data residing in the shared VMCPU structure. */ -
trunk/src/VBox/VMM/include/CPUMInternal.mac
r47660 r47844 92 92 %if HC_ARCH_BITS == 32 93 93 .abPadding2 resb 4 94 %endif95 96 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI97 .pvApicBase RTR0PTR_RES 198 .fApicDisVectors resd 199 .abPadding3 resb 4100 94 %endif 101 95 endstruc … … 437 431 .offCPUM resd 1 438 432 .u32RetCode resd 1 433 434 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 435 .pvApicBase RTR0PTR_RES 1 436 .fApicDisVectors resd 1 437 .fX2Apic resb 1 438 %else 439 .abPadding3 resb (8 + 4 + 1) 440 %endif 441 439 442 .fRawEntered resb 1 440 443 .fRemEntered resb 1 441 .abPadding2 resb (64 - 16 - 2) 444 445 .abPadding2 resb (64 - 16 - 8 - 4 - 1 - 2) 442 446 endstruc 443 447 -
trunk/src/VBox/VMM/testcase/tstVMStruct.h
r47807 r47844 45 45 GEN_CHECK_OFF(CPUM, aGuestCpuIdHyper); 46 46 GEN_CHECK_OFF(CPUM, GuestCpuIdDef); 47 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI48 GEN_CHECK_OFF(CPUM, pvApicBase);49 GEN_CHECK_OFF(CPUM, fApicDisVectors);50 #endif51 47 52 48 GEN_CHECK_SIZE(CPUMCPU); // has .mac … … 63 59 GEN_CHECK_OFF(CPUMCPU, offCPUM); 64 60 GEN_CHECK_OFF(CPUMCPU, u32RetCode); 61 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 62 GEN_CHECK_OFF(CPUMCPU, pvApicBase); 63 GEN_CHECK_OFF(CPUMCPU, fApicDisVectors); 64 GEN_CHECK_OFF(CPUMCPU, fX2Apic); 65 #endif 65 66 GEN_CHECK_OFF(CPUMCPU, fRawEntered); 66 67 GEN_CHECK_OFF(CPUMCPU, fRemEntered); -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r46358 r47844 273 273 CHECK_MEMBER_ALIGNMENT(VM, aCpus[1].cpum.s.Hyper, 64); 274 274 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 275 CHECK_MEMBER_ALIGNMENT(VM, cpum.s.pvApicBase, 8);275 CHECK_MEMBER_ALIGNMENT(VM, aCpus[0].cpum.s.pvApicBase, 8); 276 276 #endif 277 277
Note:
See TracChangeset
for help on using the changeset viewer.