Changeset 33935 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Nov 10, 2010 3:37:02 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 67595
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/CPUMInternal.h
r33540 r33935 313 313 314 314 #if HC_ARCH_BITS == 32 315 /** Align the next member, and thereby the structure, on a 64-byte boundary. */316 315 uint8_t abPadding2[4]; 317 316 #endif 318 317 318 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 319 RTHCPTR pvApicBase; 320 uint32_t fApicDisVectors; 321 uint8_t abPadding3[HC_ARCH_BITS == 32 ? 56 : 52]; 322 #endif 323 319 324 /** 320 * Guest context on raw mode entry. 325 * Guest context on raw mode entry. 64-byte aligned! 321 326 * This a debug feature, see CPUMR3SaveEntryCtx. 322 327 */ -
trunk/src/VBox/VMM/CPUMInternal.mac
r30263 r33935 81 81 .aGuestCpuIdCentaur resb 16*4 82 82 .GuestCpuIdDef resb 16 83 84 %if HC_ARCH_BITS == 32 85 .abPadding2 resb 4 86 %endif 87 88 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 89 %if HC_ARCH_BITS == 32 90 .pvApicBase resd 1 91 .fApicDisVectors resd 1 92 %else 93 .pvApicBase resq 1 94 .fApicDisVectors resd 1 95 %endif 96 %endif 83 97 84 98 alignb 64 … … 450 464 sub %1, dword [%1 + CPUMCPU.offCPUM] 451 465 %endmacro 466 467 ;; 468 ; Converts the CPUMCPU pointer to CPUM 469 ; @param %1 register name (PVM) 470 ; @param %2 register name (CPUMCPU offset) 471 %macro CPUM_FROM_CPUMCPU_WITH_OFFSET 2 472 sub %1, %2 473 %endmacro -
trunk/src/VBox/VMM/Makefile.kmk
r32253 r33935 38 38 ifdef VBOX_WITH_R0_LOGGING 39 39 VMM_COMMON_DEFS += VBOX_WITH_R0_LOGGING 40 endif 41 ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 42 VMM_COMMON_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 40 43 endif 41 44 # VMM_COMMON_DEFS += VBOX_WITH_NS_ACCOUNTING_STATS -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r29250 r33935 30 30 #include <iprt/assert.h> 31 31 #include <iprt/asm-amd64-x86.h> 32 32 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 33 # include <iprt/mem.h> 34 # include <iprt/memobj.h> 35 # include <VBox/apic.h> 36 #endif 37 38 39 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 40 /** Local APIC mappings */ 41 typedef struct 42 { 43 bool fEnabled; 44 uint64_t PhysBase; 45 RTR0MEMOBJ hMemObj; 46 RTR0MEMOBJ hMapObj; 47 void *pv; 48 uint32_t fHasThermal; 49 } CPUMHOSTLAPIC; 50 51 static CPUMHOSTLAPIC g_aLApics[RTCPUSET_MAX_CPUS]; 52 static int cpumR0MapLocalApics(void); 53 static void cpumR0UnmapLocalApics(void); 54 #endif 55 56 57 /** 58 * Does the Ring-0 CPU initialization once during module load. 59 * XXX Host-CPU hot-plugging? 60 */ 61 VMMR0DECL(int) CPUMR0ModuleInit(void) 62 { 63 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 64 return cpumR0MapLocalApics(); 65 #endif 66 } 67 68 69 /** 70 * Terminate the module. 71 */ 72 VMMR0DECL(int) CPUMR0ModuleTerm(void) 73 { 74 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 75 cpumR0UnmapLocalApics(); 76 #endif 77 return VINF_SUCCESS; 78 } 33 79 34 80 … … 45 91 { 46 92 LogFlow(("CPUMR0Init: %p\n", pVM)); 93 94 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 95 for (unsigned i = 0; i < RT_ELEMENTS(g_aLApics); i++) 96 if (g_aLApics[i].pv) 97 SUPR0Printf(" CPU%d: %llx => %llx\n", i, g_aLApics[i].PhysBase, (uint64_t)g_aLApics[i].pv); 98 #endif 47 99 48 100 /* … … 590 642 } 591 643 644 645 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 646 /** 647 * Worker for cpumR0MapLocalApics. Check each CPU for a present Local APIC. 648 * Play safe and treat each CPU separate. 649 */ 650 static void cpumR0MapLocalApicWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2) 651 { 652 uint32_t u32MaxIdx; 653 uint32_t u32EBX, u32ECX, u32EDX; 654 int iCpu = RTMpCpuIdToSetIndex(idCpu); 655 Assert(iCpu < RTCPUSET_MAX_CPUS); 656 ASMCpuId(0, &u32MaxIdx, &u32EBX, &u32ECX, &u32EDX); 657 if ( ( ( u32EBX == X86_CPUID_VENDOR_INTEL_EBX 658 && u32ECX == X86_CPUID_VENDOR_INTEL_ECX 659 && u32EDX == X86_CPUID_VENDOR_INTEL_EDX) 660 || ( u32EBX == X86_CPUID_VENDOR_AMD_EBX 661 && u32ECX == X86_CPUID_VENDOR_AMD_ECX 662 && u32EDX == X86_CPUID_VENDOR_AMD_EDX)) 663 && u32MaxIdx >= 1) 664 { 665 ASMCpuId(1, &u32MaxIdx, &u32EBX, &u32ECX, &u32EDX); 666 if ( (u32EDX & X86_CPUID_FEATURE_EDX_APIC) 667 && (u32EDX & X86_CPUID_FEATURE_EDX_MSR)) 668 { 669 uint64_t u64ApicBase = ASMRdMsr(MSR_IA32_APICBASE); 670 uint32_t u32MaxExtIdx; 671 /* see Intel Manual: Local APIC Status and Location: MAXPHYADDR default is bit 36 */ 672 uint64_t u64Mask = UINT64_C(0x0000000ffffff000); 673 ASMCpuId(0x80000000, &u32MaxExtIdx, &u32EBX, &u32ECX, &u32EDX); 674 if ( u32MaxExtIdx >= 0x80000008 675 && u32MaxExtIdx < 0x8000ffff) 676 { 677 uint32_t u32PhysBits; 678 ASMCpuId(0x80000008, &u32PhysBits, &u32EBX, &u32ECX, &u32EDX); 679 u32PhysBits &= 0xff; 680 u64Mask = ((UINT64_C(1) << u32PhysBits) - 1) & UINT64_C(0xfffffffffffff000); 681 } 682 g_aLApics[iCpu].fEnabled = true; 683 g_aLApics[iCpu].PhysBase = u64ApicBase & u64Mask; 684 } 685 } 686 } 687 688 689 /** 690 * Map the MMIO page of each local APIC in the system. 691 */ 692 static int cpumR0MapLocalApics(void) 693 { 694 int rc = RTMpOnAll(cpumR0MapLocalApicWorker, NULL, NULL); 695 for (unsigned iCpu = 0; RT_SUCCESS(rc) && iCpu < RT_ELEMENTS(g_aLApics); iCpu++) 696 { 697 if (g_aLApics[iCpu].fEnabled) 698 { 699 rc = RTR0MemObjEnterPhys(&g_aLApics[iCpu].hMemObj, g_aLApics[iCpu].PhysBase, 700 PAGE_SIZE, RTMEM_CACHE_POLICY_MMIO); 701 if (RT_SUCCESS(rc)) 702 rc = RTR0MemObjMapKernel(&g_aLApics[iCpu].hMapObj, g_aLApics[iCpu].hMemObj, (void*)-1, 703 PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 704 if (RT_SUCCESS(rc)) 705 { 706 void *pApicBase = RTR0MemObjAddress(g_aLApics[iCpu].hMapObj); 707 uint32_t ApicVersion = ApicRegRead(pApicBase, APIC_REG_VERSION); 708 /* 709 * 0x0X 82489 external APIC 710 * 0x1X Local APIC 711 * 0x2X..0xFF reserved 712 */ 713 if ((APIC_REG_VERSION_GET_VER(ApicVersion) & 0xF0) != 0x10) 714 { 715 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */); 716 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */); 717 g_aLApics[iCpu].fEnabled = false; 718 continue; 719 } 720 g_aLApics[iCpu].fHasThermal = APIC_REG_VERSION_GET_MAX_LVT(ApicVersion) >= 5; 721 g_aLApics[iCpu].pv = pApicBase; 722 } 723 } 724 } 725 if (RT_FAILURE(rc)) 726 { 727 cpumR0UnmapLocalApics(); 728 return rc; 729 } 730 731 return VINF_SUCCESS; 732 } 733 734 735 /** 736 * Unmap the Local APIC of all host CPUs. 737 */ 738 static void cpumR0UnmapLocalApics(void) 739 { 740 for (unsigned iCpu = RT_ELEMENTS(g_aLApics); iCpu-- > 0;) 741 { 742 if (g_aLApics[iCpu].pv) 743 { 744 RTR0MemObjFree(g_aLApics[iCpu].hMapObj, true /* fFreeMappings */); 745 RTR0MemObjFree(g_aLApics[iCpu].hMemObj, true /* fFreeMappings */); 746 g_aLApics[iCpu].fEnabled = false; 747 g_aLApics[iCpu].pv = NULL; 748 } 749 } 750 } 751 752 753 /** 754 * Write the Local APIC mapping address of the current host CPU to CPUM to be 755 * able to access the APIC registers in the raw mode switcher for disabling/ 756 * re-enabling the NMI. Must be called with disabled preemption or disabled 757 * interrupts! 758 * 759 * @param pVM VM handle. 760 * @param idHostCpu The ID of the current host CPU. 761 */ 762 VMMR0DECL(void) CPUMR0SetLApic(PVM pVM, RTCPUID idHostCpu) 763 { 764 pVM->cpum.s.pvApicBase = g_aLApics[RTMpCpuIdToSetIndex(idHostCpu)].pv; 765 } 766 767 #endif /* VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI */ -
trunk/src/VBox/VMM/VMMR0/VMMR0.cpp
r33595 r33935 117 117 if (RT_SUCCESS(rc)) 118 118 { 119 LogFlow(("ModuleInit: returns success.\n")); 120 return VINF_SUCCESS; 119 rc = CPUMR0ModuleInit(); 120 if (RT_SUCCESS(rc)) 121 { 122 LogFlow(("ModuleInit: returns success.\n")); 123 return VINF_SUCCESS; 124 } 121 125 } 122 126 … … 148 152 { 149 153 LogFlow(("ModuleTerm:\n")); 154 155 /* 156 * Terminate the CPUM module (Local APIC cleanup). 157 */ 158 CPUMR0ModuleTerm(); 150 159 151 160 /* … … 574 583 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 575 584 RTThreadPreemptDisable(&PreemptState); 576 ASMAtomicWriteU32(&pVCpu->idHostCpu, RTMpCpuId()); 585 RTCPUID idHostCpu = RTMpCpuId(); 586 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 587 CPUMR0SetLApic(pVM, idHostCpu); 588 #endif 589 ASMAtomicWriteU32(&pVCpu->idHostCpu, idHostCpu); 577 590 if (pVM->vmm.s.fUsePeriodicPreemptionTimers) 578 591 GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu)); … … 887 900 RTCCUINTREG fFlags = ASMIntDisableFlags(); 888 901 902 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 903 RTCPUID idHostCpu = RTMpCpuId(); 904 CPUMR0SetLApic(pVM, idHostCpu); 905 #endif 906 889 907 /* We might need to disable VT-x if the active switcher turns off paging. */ 890 908 rc = HWACCMR0EnterSwitcher(pVM, &fVTxDisabled); -
trunk/src/VBox/VMM/VMMSwitcher/AMD64andLegacy.mac
r33540 r33935 23 23 ;******************************************************************************* 24 24 %include "VBox/asmdefs.mac" 25 %include "VBox/apic.mac" 25 26 %include "VBox/x86.mac" 26 27 %include "VBox/cpum.mac" … … 250 251 pop qword [rdx + r8 + CPUMCPU.Host.rflags] 251 252 253 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 254 ; Block Local APIC NMI vectors 255 mov rbx, [rdx + CPUM.pvApicBase] 256 or rbx, rbx 257 jz htg_noapic 258 xor edi, edi 259 mov eax, [rbx + APIC_REG_LVT_LINT0] 260 mov ecx, eax 261 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 262 cmp ecx, APIC_REG_LVT_MODE_NMI 263 jne htg_nolint0 264 or edi, 0x01 265 or eax, APIC_REG_LVT_MASKED 266 mov [rbx + APIC_REG_LVT_LINT0], eax 267 mov eax, [rbx + APIC_REG_LVT_LINT0] ; write completion 268 htg_nolint0: 269 mov eax, [rbx + APIC_REG_LVT_LINT1] 270 mov ecx, eax 271 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 272 cmp ecx, APIC_REG_LVT_MODE_NMI 273 jne htg_nolint1 274 or edi, 0x02 275 or eax, APIC_REG_LVT_MASKED 276 mov [rbx + APIC_REG_LVT_LINT1], eax 277 mov eax, [rbx + APIC_REG_LVT_LINT1] ; write completion 278 htg_nolint1: 279 mov eax, [rbx + APIC_REG_LVT_PC] 280 mov ecx, eax 281 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 282 cmp ecx, APIC_REG_LVT_MODE_NMI 283 jne htg_nopc 284 or edi, 0x04 285 or eax, APIC_REG_LVT_MASKED 286 mov [rbx + APIC_REG_LVT_PC], eax 287 mov eax, [rbx + APIC_REG_LVT_PC] ; write completion 288 htg_nopc: 289 mov eax, [rbx + APIC_REG_VERSION] 290 shr eax, 16 291 cmp al, 5 292 jb htg_notherm 293 mov eax, [rbx + APIC_REG_LVT_THMR] 294 mov ecx, eax 295 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 296 cmp ecx, APIC_REG_LVT_MODE_NMI 297 jne htg_notherm 298 or edi, 0x08 299 or eax, APIC_REG_LVT_MASKED 300 mov [rbx + APIC_REG_LVT_THMR], eax 301 mov eax, [rbx + APIC_REG_LVT_THMR] ; write completion 302 htg_notherm: 303 mov [rdx + CPUM.fApicDisVectors], edi 304 htg_noapic: 305 %endif 306 252 307 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter. 253 308 ; save MSR_IA32_SYSENTER_CS register. … … 1049 1104 mov rdx, rbx 1050 1105 1106 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 1107 ;Unblock Local APIC NMI vectors 1108 mov ecx, [rdx + CPUM.fApicDisVectors] 1109 mov rbx, [rdx + CPUM.pvApicBase] 1110 shr ecx, 1 1111 jnc gth_nolint0 1112 and dword [rbx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED 1113 gth_nolint0: 1114 shr ecx, 1 1115 jnc gth_nolint1 1116 and dword [rbx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED 1117 gth_nolint1: 1118 shr ecx, 1 1119 jnc gth_nopc 1120 and dword [rbx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED 1121 gth_nopc: 1122 shr ecx, 1 1123 jnc gth_notherm 1124 and dword [rbx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED 1125 gth_notherm: 1126 %endif 1051 1127 1052 1128 ; restore general registers. -
trunk/src/VBox/VMM/VMMSwitcher/LegacyandAMD64.mac
r30180 r33935 26 26 ;******************************************************************************* 27 27 %include "VBox/asmdefs.mac" 28 %include "VBox/apic.mac" 28 29 %include "VBox/x86.mac" 29 30 %include "VBox/cpum.mac" … … 155 156 %endif 156 157 158 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 159 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp 160 mov ebx, [edx + CPUM.pvApicBase] 161 or ebx, ebx 162 jz htg_noapic 163 mov eax, [ebx + APIC_REG_LVT_LINT0] 164 mov ecx, eax 165 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 166 cmp ecx, APIC_REG_LVT_MODE_NMI 167 jne htg_nolint0 168 or edi, 0x01 169 or eax, APIC_REG_LVT_MASKED 170 mov [ebx + APIC_REG_LVT_LINT0], eax 171 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion 172 htg_nolint0: 173 mov eax, [ebx + APIC_REG_LVT_LINT1] 174 mov ecx, eax 175 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 176 cmp ecx, APIC_REG_LVT_MODE_NMI 177 jne htg_nolint1 178 or edi, 0x02 179 or eax, APIC_REG_LVT_MASKED 180 mov [ebx + APIC_REG_LVT_LINT1], eax 181 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion 182 htg_nolint1: 183 mov eax, [ebx + APIC_REG_LVT_PC] 184 mov ecx, eax 185 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 186 cmp ecx, APIC_REG_LVT_MODE_NMI 187 jne htg_nopc 188 or edi, 0x04 189 or eax, APIC_REG_LVT_MASKED 190 mov [ebx + APIC_REG_LVT_PC], eax 191 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion 192 htg_nopc: 193 mov eax, [ebx + APIC_REG_VERSION] 194 shr eax, 16 195 cmp al, 5 196 jb htg_notherm 197 mov eax, [ebx + APIC_REG_LVT_THMR] 198 mov ecx, eax 199 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 200 cmp ecx, APIC_REG_LVT_MODE_NMI 201 jne htg_notherm 202 or edi, 0x08 203 or eax, APIC_REG_LVT_MASKED 204 mov [ebx + APIC_REG_LVT_THMR], eax 205 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion 206 htg_notherm: 207 mov [edx + CPUM.fApicDisVectors], edi 208 htg_noapic: 209 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 210 %endif 211 157 212 ; control registers. 158 213 mov eax, cr0 … … 287 342 ; Load CPUM pointer into rdx 288 343 mov rdx, [NAME(pCpumIC) wrt rip] 289 CPUMCPU_FROM_CPUM_WITH_OFFSET 344 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 290 345 291 346 mov rax, cs … … 369 424 ; Load CPUM pointer into rdx 370 425 mov rdx, [NAME(pCpumIC) wrt rip] 371 CPUMCPU_FROM_CPUM_WITH_OFFSET 426 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 372 427 373 428 %ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 516 571 FIXUP FIX_GC_CPUM_OFF, 1, 0 517 572 mov edx, 0ffffffffh 518 CPUMCPU_FROM_CPUM_WITH_OFFSET 573 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 519 574 mov esi, [edx + CPUMCPU.Host.cr3] 520 575 mov cr3, esi … … 523 578 FIXUP FIX_HC_CPUM_OFF, 1, 0 524 579 mov edx, 0ffffffffh 525 CPUMCPU_FROM_CPUM_WITH_OFFSET 580 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 526 581 527 582 ; restore the host EFER … … 570 625 ;mov ecx, [edx + CPUMCPU.Host.cr2] ; assumes this is waste of time. 571 626 ;mov cr2, ecx 627 628 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 629 ; Restore blocked Local APIC NMI vectors 630 CPUM_FROM_CPUMCPU_WITH_OFFSET edx, ebp 631 mov ebx, [edx + CPUM.pvApicBase] 632 mov ecx, [edx + CPUM.fApicDisVectors] 633 CPUMCPU_FROM_CPUM_WITH_OFFSET edx, ebp 634 shr ecx, 1 635 jnc gth_nolint0 636 and dword [ebx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED 637 gth_nolint0: 638 shr ecx, 1 639 jnc gth_nolint1 640 and dword [ebx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED 641 gth_nolint1: 642 shr ecx, 1 643 jnc gth_nopc 644 and dword [ebx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED 645 gth_nopc: 646 shr ecx, 1 647 jnc gth_notherm 648 and dword [ebx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED 649 gth_notherm: 650 %endif 572 651 573 652 ; restore general registers. -
trunk/src/VBox/VMM/VMMSwitcher/PAEand32Bit.mac
r33540 r33935 22 22 ;******************************************************************************* 23 23 %include "VBox/asmdefs.mac" 24 %include "VBox/apic.mac" 24 25 %include "VBox/x86.mac" 25 26 %include "VBox/cpum.mac" … … 136 137 pushfd 137 138 pop dword [edx + CPUMCPU.Host.eflags] 139 140 ; Block Local APIC NMI vectors 141 xor edi, edi 142 143 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 144 mov esi, edx 145 CPUM_FROM_CPUMCPU(edx) 146 mov ebx, [edx + CPUM.pvApicBase] 147 or ebx, ebx 148 jz htg_noapic 149 mov eax, [ebx + APIC_REG_LVT_LINT0] 150 mov ecx, eax 151 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 152 cmp ecx, APIC_REG_LVT_MODE_NMI 153 jne htg_nolint0 154 or edi, 0x01 155 or eax, APIC_REG_LVT_MASKED 156 mov [ebx + APIC_REG_LVT_LINT0], eax 157 mov eax, [ebx + APIC_REG_LVT_LINT0] ; write completion 158 htg_nolint0: 159 mov eax, [ebx + APIC_REG_LVT_LINT1] 160 mov ecx, eax 161 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 162 cmp ecx, APIC_REG_LVT_MODE_NMI 163 jne htg_nolint1 164 or edi, 0x02 165 or eax, APIC_REG_LVT_MASKED 166 mov [ebx + APIC_REG_LVT_LINT1], eax 167 mov eax, [ebx + APIC_REG_LVT_LINT1] ; write completion 168 htg_nolint1: 169 mov eax, [ebx + APIC_REG_LVT_PC] 170 mov ecx, eax 171 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 172 cmp ecx, APIC_REG_LVT_MODE_NMI 173 jne htg_nopc 174 or edi, 0x04 175 or eax, APIC_REG_LVT_MASKED 176 mov [ebx + APIC_REG_LVT_PC], eax 177 mov eax, [ebx + APIC_REG_LVT_PC] ; write completion 178 htg_nopc: 179 mov eax, [ebx + APIC_REG_VERSION] 180 shr eax, 16 181 cmp al, 5 182 jb htg_notherm 183 mov eax, [ebx + APIC_REG_LVT_THMR] 184 mov ecx, eax 185 and ecx, (APIC_REG_LVT_MASKED | APIC_REG_LVT_MODE_MASK) 186 cmp ecx, APIC_REG_LVT_MODE_NMI 187 jne htg_notherm 188 or edi, 0x08 189 or eax, APIC_REG_LVT_MASKED 190 mov [ebx + APIC_REG_LVT_THMR], eax 191 mov eax, [ebx + APIC_REG_LVT_THMR] ; write completion 192 htg_notherm: 193 mov [edx + CPUM.fApicDisVectors], edi 194 htg_noapic: 195 mov edx, esi 196 %endif 138 197 139 198 FIXUP FIX_NO_SYSENTER_JMP, 0, htg_no_sysenter - NAME(Start) ; this will insert a jmp htg_no_sysenter if host doesn't use sysenter. … … 935 994 gth_debug_regs_no: 936 995 996 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 997 mov esi, edx 998 CPUM_FROM_CPUMCPU(edx) 999 ; Restore blocked Local APIC NMI vectors 1000 mov ebx, [edx + CPUM.pvApicBase] 1001 mov ecx, [edx + CPUM.fApicDisVectors] 1002 mov edx, esi 1003 shr ecx, 1 1004 jnc gth_nolint0 1005 and dword [ebx + APIC_REG_LVT_LINT0], ~APIC_REG_LVT_MASKED 1006 gth_nolint0: 1007 shr ecx, 1 1008 jnc gth_nolint1 1009 and dword [ebx + APIC_REG_LVT_LINT1], ~APIC_REG_LVT_MASKED 1010 gth_nolint1: 1011 shr ecx, 1 1012 jnc gth_nopc 1013 and dword [ebx + APIC_REG_LVT_PC], ~APIC_REG_LVT_MASKED 1014 gth_nopc: 1015 shr ecx, 1 1016 jnc gth_notherm 1017 and dword [ebx + APIC_REG_LVT_THMR], ~APIC_REG_LVT_MASKED 1018 gth_notherm: 1019 %endif 1020 937 1021 ; restore general registers. 938 1022 mov eax, edi ; restore return code. eax = return code !! -
trunk/src/VBox/VMM/testcase/Makefile.kmk
r29329 r33935 107 107 tstVMStructRC_DEFS += VBOX_WITH_R0_LOGGING 108 108 endif 109 ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 110 tstVMStructRC_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 111 endif 109 112 tstVMStructRC_SOURCES = tstVMStructRC.cpp 110 113 tstVMStructRC_INCS = $(VBOX_PATH_VMM_SRC) $(VBOX_PATH_VMM_SRC)/PATM … … 129 132 tstVMStructSize_DEFS += VBOX_WITH_R0_LOGGING 130 133 endif 134 ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 135 tstVMStructSize_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 136 endif 131 137 132 138 tstAsmStructs_TEMPLATE = VBOXR3AUTOTST … … 137 143 ifdef VBOX_WITH_R0_LOGGING 138 144 tstAsmStructs_DEFS += VBOX_WITH_R0_LOGGING 145 endif 146 ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 147 tstAsmStructs_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 139 148 endif 140 149 tstAsmStructs_INCS = $(VBOX_PATH_VMM_SRC) $(VBOX_VMM_TESTCASE_OUT_DIR) … … 147 156 ifdef VBOX_WITH_R0_LOGGING 148 157 tstAsmStructsRC_DEFS += VBOX_WITH_R0_LOGGING 158 endif 159 ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 160 tstAsmStructsRC_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 149 161 endif 150 162 tstAsmStructsRC_INCS = $(VBOX_PATH_VMM_SRC) $(VBOX_VMM_TESTCASE_OUT_DIR) … … 286 298 $(DEFS.$(KBUILD_TARGET_ARCH)) \ 287 299 $(DEFS.$(KBUILD_TARGET).$(KBUILD_TARGET_ARCH)) \ 300 $(if $(VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI),VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI,) \ 288 301 ) \ 289 302 -f $(if $(eq $(KBUILD_TARGET),darwin),macho,elf) \ -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r33540 r33935 260 260 CHECK_MEMBER_ALIGNMENT(VM, aCpus[0].cpum.s.Hyper, 64); 261 261 CHECK_MEMBER_ALIGNMENT(VM, aCpus[1].cpum.s.Hyper, 64); 262 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI 263 CHECK_MEMBER_ALIGNMENT(VM, cpum.s.pvApicBase, 8); 264 #endif 262 265 CHECK_MEMBER_ALIGNMENT(VM, cpum.s.GuestEntry, 64); 263 266
Note:
See TracChangeset
for help on using the changeset viewer.