; $Id: CPUMInternal.mac 76553 2019-01-01 01:45:53Z vboxsync $ ;; @file ; CPUM - Internal header file (asm). ; ; ; Copyright (C) 2006-2019 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http://www.virtualbox.org. This file is free software; ; you can redistribute it and/or modify it under the terms of the GNU ; General Public License (GPL) as published by the Free Software ; Foundation, in version 2 as it comes in the "COPYING" file of the ; VirtualBox OSE distribution. VirtualBox OSE is distributed in the ; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. ; %include "VBox/asmdefs.mac" %include "VBox/vmm/cpum.mac" ;; Check sanity. %ifdef VBOX_WITH_KERNEL_USING_XMM %ifndef IN_RING0 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!" %endif %endif ;; For numeric expressions %ifdef RT_ARCH_AMD64 %define CPUM_IS_AMD64 1 %else %define CPUM_IS_AMD64 0 %endif ;; ; CPU info struc CPUMINFO .cMsrRanges resd 1 ; uint32_t .fMsrMask resd 1 ; uint32_t .fMxCsrMask resd 1 ; uint32_t .cCpuIdLeaves resd 1 ; uint32_t .iFirstExtCpuIdLeaf resd 1 ; uint32_t .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID .DefCpuId resb CPUMCPUID_size ; CPUMCPUID .uScalableBusFreq resq 1 ; uint64_t .paMsrRangesR0 RTR0PTR_RES 1 ; R0PTRTYPE(PCPUMMSRRANGE) .paCpuIdLeavesR0 RTR0PTR_RES 1 ; R0PTRTYPE(PCPUMCPUIDLEAF) .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE) .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF) .paMsrRangesRC RTRCPTR_RES 1 ; RCPTRTYPE(PCPUMMSRRANGE) .paCpuIdLeavesRC RTRCPTR_RES 1 ; RCPTRTYPE(PCPUMCPUIDLEAF) endstruc %define CPUM_USED_FPU_HOST RT_BIT(0) %define CPUM_USED_FPU_GUEST RT_BIT(10) %define CPUM_USED_FPU_SINCE_REM RT_BIT(1) %define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2) %define CPUM_USE_SYSENTER RT_BIT(3) %define CPUM_USE_SYSCALL RT_BIT(4) %define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5) %define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6) %define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7) %define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8) %define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9) %define CPUM_SYNC_FPU_STATE RT_BIT(16) %define CPUM_SYNC_DEBUG_REGS_GUEST RT_BIT(17) %define CPUM_SYNC_DEBUG_REGS_HYPER RT_BIT(18) %define CPUM_USE_FFXSR_LEAKY RT_BIT(19) %define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20) %define CPUM_HANDLER_DS 1 %define CPUM_HANDLER_ES 2 %define CPUM_HANDLER_FS 3 %define CPUM_HANDLER_GS 4 %define CPUM_HANDLER_IRET 5 %define CPUM_HANDLER_TYPEMASK 0ffh %define CPUM_HANDLER_CTXCORE_IN_EBP RT_BIT(31) struc CPUM ;... .offCPUMCPU0 resd 1 .fHostUseFlags resd 1 ; CR4 masks .CR4.AndMask resd 1 .CR4.OrMask resd 1 ; entered rawmode? .u8PortableCpuIdLevel resb 1 .fPendingRestore resb 1 alignb 8 .fXStateGuestMask resq 1 .fXStateHostMask resq 1 alignb 64 .HostFeatures resb 48 .GuestFeatures resb 48 .GuestInfo resb RTHCPTR_CB*4 + RTRCPTR_CB*2 + 4*12 ; Patch manager saved state compatability CPUID leaf arrays .aGuestCpuIdPatmStd resb 16*6 .aGuestCpuIdPatmExt resb 16*10 .aGuestCpuIdPatmCentaur resb 16*4 alignb 8 .cMsrWrites resq 1 .cMsrWritesToIgnoredBits resq 1 .cMsrWritesRaiseGp resq 1 .cMsrWritesUnknown resq 1 .cMsrReads resq 1 .cMsrReadsRaiseGp resq 1 .cMsrReadsUnknown resq 1 endstruc struc CPUMCPU ; ; Guest context state ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.) ; .Guest resq 0 .Guest.eax resq 1 .Guest.ecx resq 1 .Guest.edx resq 1 .Guest.ebx resq 1 .Guest.esp resq 1 .Guest.ebp resq 1 .Guest.esi resq 1 .Guest.edi resq 1 .Guest.r8 resq 1 .Guest.r9 resq 1 .Guest.r10 resq 1 .Guest.r11 resq 1 .Guest.r12 resq 1 .Guest.r13 resq 1 .Guest.r14 resq 1 .Guest.r15 resq 1 .Guest.es.Sel resw 1 .Guest.es.PaddingSel resw 1 .Guest.es.ValidSel resw 1 .Guest.es.fFlags resw 1 .Guest.es.u64Base resq 1 .Guest.es.u32Limit resd 1 .Guest.es.Attr resd 1 .Guest.cs.Sel resw 1 .Guest.cs.PaddingSel resw 1 .Guest.cs.ValidSel resw 1 .Guest.cs.fFlags resw 1 .Guest.cs.u64Base resq 1 .Guest.cs.u32Limit resd 1 .Guest.cs.Attr resd 1 .Guest.ss.Sel resw 1 .Guest.ss.PaddingSel resw 1 .Guest.ss.ValidSel resw 1 .Guest.ss.fFlags resw 1 .Guest.ss.u64Base resq 1 .Guest.ss.u32Limit resd 1 .Guest.ss.Attr resd 1 .Guest.ds.Sel resw 1 .Guest.ds.PaddingSel resw 1 .Guest.ds.ValidSel resw 1 .Guest.ds.fFlags resw 1 .Guest.ds.u64Base resq 1 .Guest.ds.u32Limit resd 1 .Guest.ds.Attr resd 1 .Guest.fs.Sel resw 1 .Guest.fs.PaddingSel resw 1 .Guest.fs.ValidSel resw 1 .Guest.fs.fFlags resw 1 .Guest.fs.u64Base resq 1 .Guest.fs.u32Limit resd 1 .Guest.fs.Attr resd 1 .Guest.gs.Sel resw 1 .Guest.gs.PaddingSel resw 1 .Guest.gs.ValidSel resw 1 .Guest.gs.fFlags resw 1 .Guest.gs.u64Base resq 1 .Guest.gs.u32Limit resd 1 .Guest.gs.Attr resd 1 .Guest.eip resq 1 .Guest.eflags resq 1 .Guest.cr0 resq 1 .Guest.cr2 resq 1 .Guest.cr3 resq 1 .Guest.cr4 resq 1 .Guest.dr resq 8 .Guest.gdtrPadding resw 3 .Guest.gdtr resw 0 .Guest.gdtr.cbGdt resw 1 .Guest.gdtr.pGdt resq 1 .Guest.idtrPadding resw 3 .Guest.idtr resw 0 .Guest.idtr.cbIdt resw 1 .Guest.idtr.pIdt resq 1 .Guest.ldtr.Sel resw 1 .Guest.ldtr.PaddingSel resw 1 .Guest.ldtr.ValidSel resw 1 .Guest.ldtr.fFlags resw 1 .Guest.ldtr.u64Base resq 1 .Guest.ldtr.u32Limit resd 1 .Guest.ldtr.Attr resd 1 .Guest.tr.Sel resw 1 .Guest.tr.PaddingSel resw 1 .Guest.tr.ValidSel resw 1 .Guest.tr.fFlags resw 1 .Guest.tr.u64Base resq 1 .Guest.tr.u32Limit resd 1 .Guest.tr.Attr resd 1 .Guest.SysEnter.cs resb 8 .Guest.SysEnter.eip resb 8 .Guest.SysEnter.esp resb 8 .Guest.msrEFER resb 8 .Guest.msrSTAR resb 8 .Guest.msrPAT resb 8 .Guest.msrLSTAR resb 8 .Guest.msrCSTAR resb 8 .Guest.msrSFMASK resb 8 .Guest.msrKERNELGSBASE resb 8 .Guest.uMsrPadding0 resb 8 alignb 8 .Guest.aXcr resq 2 .Guest.fXStateMask resq 1 .Guest.pXStateR0 RTR0PTR_RES 1 alignb 8 .Guest.pXStateR3 RTR3PTR_RES 1 alignb 8 .Guest.pXStateRC RTRCPTR_RES 1 .Guest.aoffXState resw 64 .Guest.fWorldSwitcher resd 1 .Guest.fExtrn resq 1 alignb 8 .Guest.hwvirt.svm.uMsrHSavePa resq 1 .Guest.hwvirt.svm.GCPhysVmcb resq 1 .Guest.hwvirt.svm.pVmcbR0 RTR0PTR_RES 1 alignb 8 .Guest.hwvirt.svm.pVmcbR3 RTR3PTR_RES 1 alignb 8 .Guest.hwvirt.svm.HostState resb 184 .Guest.hwvirt.svm.uPrevPauseTick resq 1 .Guest.hwvirt.svm.cPauseFilter resw 1 .Guest.hwvirt.svm.cPauseFilterThreshold resw 1 .Guest.hwvirt.svm.fInterceptEvents resb 1 alignb 8 .Guest.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 alignb 8 .Guest.hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1 alignb 8 .Guest.hwvirt.svm.pvIoBitmapR0 RTR0PTR_RES 1 alignb 8 .Guest.hwvirt.svm.pvIoBitmapR3 RTR3PTR_RES 1 alignb 8 .Guest.hwvirt.svm.HCPhysVmcb RTHCPHYS_RES 1 .Guest.hwvirt.svm.u64Padding0 resq 19 .Guest.hwvirt.enmHwvirt resd 1 .Guest.hwvirt.fGif resb 1 alignb 8 .Guest.hwvirt.fLocalForcedActions resd 1 alignb 64 .GuestMsrs resq 0 .GuestMsrs.au64 resq 64 ; ; Other stuff. ; .fUseFlags resd 1 .fChanged resd 1 .offCPUM resd 1 .u32RetCode resd 1 %ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI .pvApicBase RTR0PTR_RES 1 .fApicDisVectors resd 1 .fX2Apic resb 1 %else .abPadding3 resb (RTR0PTR_CB + 4 + 1) %endif .fRawEntered resb 1 .fRemEntered resb 1 .fCpuIdApicFeatureVisible resb 1 .abPadding2 resb (64 - 16 - RTR0PTR_CB - 4 - 1 - 3) ; ; Host context state ; alignb 64 .Host resb 0 %if HC_ARCH_BITS == 64 ;.Host.rax resq 1 - scratch .Host.rbx resq 1 ;.Host.rcx resq 1 - scratch ;.Host.rdx resq 1 - scratch .Host.rdi resq 1 .Host.rsi resq 1 .Host.rbp resq 1 .Host.rsp resq 1 ;.Host.r8 resq 1 - scratch ;.Host.r9 resq 1 - scratch .Host.r10 resq 1 .Host.r11 resq 1 .Host.r12 resq 1 .Host.r13 resq 1 .Host.r14 resq 1 .Host.r15 resq 1 ;.Host.rip resd 1 - scratch .Host.rflags resq 1 %endif %if HC_ARCH_BITS == 32 ;.Host.eax resd 1 - scratch .Host.ebx resd 1 ;.Host.edx resd 1 - scratch ;.Host.ecx resd 1 - scratch .Host.edi resd 1 .Host.esi resd 1 .Host.ebp resd 1 .Host.eflags resd 1 ;.Host.eip resd 1 - scratch ; lss pair! .Host.esp resd 1 %endif .Host.ss resw 1 .Host.ssPadding resw 1 .Host.gs resw 1 .Host.gsPadding resw 1 .Host.fs resw 1 .Host.fsPadding resw 1 .Host.es resw 1 .Host.esPadding resw 1 .Host.ds resw 1 .Host.dsPadding resw 1 .Host.cs resw 1 .Host.csPadding resw 1 %if HC_ARCH_BITS == 32 .Host.cr0 resd 1 ;.Host.cr2 resd 1 - scratch .Host.cr3 resd 1 .Host.cr4 resd 1 .Host.cr0Fpu resd 1 .Host.dr0 resd 1 .Host.dr1 resd 1 .Host.dr2 resd 1 .Host.dr3 resd 1 .Host.dr6 resd 1 .Host.dr7 resd 1 .Host.gdtr resb 6 ; GDT limit + linear address .Host.gdtrPadding resw 1 .Host.idtr resb 6 ; IDT limit + linear address .Host.idtrPadding resw 1 .Host.ldtr resw 1 .Host.ldtrPadding resw 1 .Host.tr resw 1 .Host.trPadding resw 1 alignb 8 .Host.SysEnter.cs resq 1 .Host.SysEnter.eip resq 1 .Host.SysEnter.esp resq 1 .Host.efer resq 1 .Host.auPadding resb (20) %else ; 64-bit .Host.cr0Fpu: .Host.cr0 resq 1 ;.Host.cr2 resq 1 - scratch .Host.cr3 resq 1 .Host.cr4 resq 1 .Host.cr8 resq 1 .Host.dr0 resq 1 .Host.dr1 resq 1 .Host.dr2 resq 1 .Host.dr3 resq 1 .Host.dr6 resq 1 .Host.dr7 resq 1 .Host.gdtr resb 10 ; GDT limit + linear address .Host.gdtrPadding resw 1 .Host.idtr resb 10 ; IDT limit + linear address .Host.idtrPadding resw 1 .Host.ldtr resw 1 .Host.ldtrPadding resw 1 .Host.tr resw 1 .Host.trPadding resw 1 .Host.SysEnter.cs resq 1 .Host.SysEnter.eip resq 1 .Host.SysEnter.esp resq 1 .Host.FSbase resq 1 .Host.GSbase resq 1 .Host.efer resq 1 .Host.auPadding resb 4 %endif ; 64-bit .Host.pXStateRC RTRCPTR_RES 1 alignb RTR0PTR_CB .Host.pXStateR0 RTR0PTR_RES 1 .Host.pXStateR3 RTR3PTR_RES 1 alignb 8 .Host.xcr0 resq 1 .Host.fXStateMask resq 1 ; ; Hypervisor Context (same as .Guest above). ; alignb 64 .Hyper resq 0 .Hyper.eax resq 1 .Hyper.ecx resq 1 .Hyper.edx resq 1 .Hyper.ebx resq 1 .Hyper.esp resq 1 .Hyper.ebp resq 1 .Hyper.esi resq 1 .Hyper.edi resq 1 .Hyper.r8 resq 1 .Hyper.r9 resq 1 .Hyper.r10 resq 1 .Hyper.r11 resq 1 .Hyper.r12 resq 1 .Hyper.r13 resq 1 .Hyper.r14 resq 1 .Hyper.r15 resq 1 .Hyper.es.Sel resw 1 .Hyper.es.PaddingSel resw 1 .Hyper.es.ValidSel resw 1 .Hyper.es.fFlags resw 1 .Hyper.es.u64Base resq 1 .Hyper.es.u32Limit resd 1 .Hyper.es.Attr resd 1 .Hyper.cs.Sel resw 1 .Hyper.cs.PaddingSel resw 1 .Hyper.cs.ValidSel resw 1 .Hyper.cs.fFlags resw 1 .Hyper.cs.u64Base resq 1 .Hyper.cs.u32Limit resd 1 .Hyper.cs.Attr resd 1 .Hyper.ss.Sel resw 1 .Hyper.ss.PaddingSel resw 1 .Hyper.ss.ValidSel resw 1 .Hyper.ss.fFlags resw 1 .Hyper.ss.u64Base resq 1 .Hyper.ss.u32Limit resd 1 .Hyper.ss.Attr resd 1 .Hyper.ds.Sel resw 1 .Hyper.ds.PaddingSel resw 1 .Hyper.ds.ValidSel resw 1 .Hyper.ds.fFlags resw 1 .Hyper.ds.u64Base resq 1 .Hyper.ds.u32Limit resd 1 .Hyper.ds.Attr resd 1 .Hyper.fs.Sel resw 1 .Hyper.fs.PaddingSel resw 1 .Hyper.fs.ValidSel resw 1 .Hyper.fs.fFlags resw 1 .Hyper.fs.u64Base resq 1 .Hyper.fs.u32Limit resd 1 .Hyper.fs.Attr resd 1 .Hyper.gs.Sel resw 1 .Hyper.gs.PaddingSel resw 1 .Hyper.gs.ValidSel resw 1 .Hyper.gs.fFlags resw 1 .Hyper.gs.u64Base resq 1 .Hyper.gs.u32Limit resd 1 .Hyper.gs.Attr resd 1 .Hyper.eip resq 1 .Hyper.eflags resq 1 .Hyper.cr0 resq 1 .Hyper.cr2 resq 1 .Hyper.cr3 resq 1 .Hyper.cr4 resq 1 .Hyper.dr resq 8 .Hyper.gdtrPadding resw 3 .Hyper.gdtr resw 0 .Hyper.gdtr.cbGdt resw 1 .Hyper.gdtr.pGdt resq 1 .Hyper.idtrPadding resw 3 .Hyper.idtr resw 0 .Hyper.idtr.cbIdt resw 1 .Hyper.idtr.pIdt resq 1 .Hyper.ldtr.Sel resw 1 .Hyper.ldtr.PaddingSel resw 1 .Hyper.ldtr.ValidSel resw 1 .Hyper.ldtr.fFlags resw 1 .Hyper.ldtr.u64Base resq 1 .Hyper.ldtr.u32Limit resd 1 .Hyper.ldtr.Attr resd 1 .Hyper.tr.Sel resw 1 .Hyper.tr.PaddingSel resw 1 .Hyper.tr.ValidSel resw 1 .Hyper.tr.fFlags resw 1 .Hyper.tr.u64Base resq 1 .Hyper.tr.u32Limit resd 1 .Hyper.tr.Attr resd 1 .Hyper.SysEnter.cs resb 8 .Hyper.SysEnter.eip resb 8 .Hyper.SysEnter.esp resb 8 .Hyper.msrEFER resb 8 .Hyper.msrSTAR resb 8 .Hyper.msrPAT resb 8 .Hyper.msrLSTAR resb 8 .Hyper.msrCSTAR resb 8 .Hyper.msrSFMASK resb 8 .Hyper.msrKERNELGSBASE resb 8 .Hyper.uMsrPadding0 resb 8 alignb 8 .Hyper.aXcr resq 2 .Hyper.fXStateMask resq 1 .Hyper.pXStateR0 RTR0PTR_RES 1 alignb 8 .Hyper.pXStateR3 RTR3PTR_RES 1 alignb 8 .Hyper.pXStateRC RTRCPTR_RES 1 .Hyper.aoffXState resw 64 .Hyper.fWorldSwitcher resd 1 .Hyper.fExtrn resq 1 alignb 8 .Hyper.hwvirt.svm.uMsrHSavePa resq 1 .Hyper.hwvirt.svm.GCPhysVmcb resq 1 .Hyper.hwvirt.svm.pVmcbR0 RTR0PTR_RES 1 alignb 8 .Hyper.hwvirt.svm.pVmcbR3 RTR3PTR_RES 1 alignb 8 .Hyper.hwvirt.svm.HostState resb 184 .Hyper.hwvirt.svm.uPrevPauseTick resq 1 .Hyper.hwvirt.svm.cPauseFilter resw 1 .Hyper.hwvirt.svm.cPauseFilterThreshold resw 1 .Hyper.hwvirt.svm.fInterceptEvents resb 1 alignb 8 .Hyper.hwvirt.svm.pvMsrBitmapR0 RTR0PTR_RES 1 alignb 8 .Hyper.hwvirt.svm.pvMsrBitmapR3 RTR3PTR_RES 1 alignb 8 .Hyper.hwvirt.svm.pvIoBitmapR0 RTR0PTR_RES 1 alignb 8 .Hyper.hwvirt.svm.pvIoBitmapR3 RTR3PTR_RES 1 alignb 8 .Hyper.hwvirt.svm.HCPhysVmcb RTHCPHYS_RES 1 .Hyper.hwvirt.svm.u64Padding0 resq 19 .Hyper.hwvirt.enmHwvirt resd 1 .Hyper.hwvirt.fGif resb 1 alignb 8 .Hyper.hwvirt.fLocalForcedActions resd 1 alignb 64 %ifdef VBOX_WITH_CRASHDUMP_MAGIC .aMagic resb 56 .uMagic resq 1 %endif endstruc ;; ; Converts the CPUM pointer to CPUMCPU ; @param %1 register name %macro CPUMCPU_FROM_CPUM 1 add %1, dword [%1 + CPUM.offCPUMCPU0] %endmacro ;; ; Converts the CPUM pointer to CPUMCPU ; @param %1 register name (CPUM) ; @param %2 register name (CPUMCPU offset) %macro CPUMCPU_FROM_CPUM_WITH_OFFSET 2 add %1, %2 %endmacro ;; ; Converts the CPUMCPU pointer to CPUM ; @param %1 register name %macro CPUM_FROM_CPUMCPU 1 sub %1, dword [%1 + CPUMCPU.offCPUM] %endmacro ;; ; Converts the CPUMCPU pointer to CPUM ; @param %1 register name (CPUM) ; @param %2 register name (CPUMCPU offset) %macro CPUM_FROM_CPUMCPU_WITH_OFFSET 2 sub %1, %2 %endmacro %if 0 ; Currently not used anywhere. ;; ; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu(). ; ; Cleans the FPU state, if necessary, before restoring the FPU. ; ; This macro ASSUMES CR0.TS is not set! ; ; @param xDX Pointer to CPUMCPU. ; @uses xAX, EFLAGS ; ; Changes here should also be reflected in CPUMRCA.asm's copy! ; %macro CLEANFPU 0 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY jz .nothing_to_clean xor eax, eax fnstsw ax ; FSW -> AX. test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions ; while clearing & loading the FPU bits in 'clean_fpu' below. jz .clean_fpu fnclex .clean_fpu: ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs. ; for the upcoming push (load) fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU. .nothing_to_clean: %endmacro %endif ; Unused. ;; ; Makes sure we don't trap (#NM) accessing the FPU. ; ; In ring-0 this is a bit of work since we may have try convince the host kernel ; to do the work for us, also, we must report any CR0 changes back to HMR0VMX ; via the VINF_CPUM_HOST_CR0_MODIFIED status code. ; ; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original ; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also ; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.) ; ; In raw-mode we will always have to clear TS and it will be recalculated ; elsewhere and thus needs no saving. ; ; @param %1 Register to return the return status code in. ; @param %2 Temporary scratch register. ; @param %3 Ring-0 only, register pointing to the CPUMCPU structure ; of the EMT we're on. ; @uses EFLAGS, CR0, %1, %2 ; %macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3 %ifdef IN_RC ; ; raw-mode - always clear it. We won't be here otherwise. ; mov %2, cr0 and %2, ~(X86_CR0_TS | X86_CR0_EM) mov cr0, %2 %else ; ; ring-0 - slightly complicated. ; xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes. mov [%3 + CPUMCPU.Host.cr0Fpu], %1 mov %2, cr0 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state. jz %%no_cr0_change %ifdef VMM_R0_TOUCH_FPU ; Touch the state and check that the kernel updated CR0 for us. movdqa xmm0, xmm0 mov %2, cr0 test %2, X86_CR0_TS | X86_CR0_EM jz %%cr0_changed %endif ; Save CR0 and clear them flags ourselves. mov [%3 + CPUMCPU.Host.cr0Fpu], %2 and %2, ~(X86_CR0_TS | X86_CR0_EM) mov cr0, %2 %endif ; IN_RING0 %%cr0_changed: mov %1, VINF_CPUM_HOST_CR0_MODIFIED %%no_cr0_change: %endmacro ;; ; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state. ; ; @param %1 The original state to restore (or zero). ; %macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1 test %1, X86_CR0_TS | X86_CR0_EM jz %%skip_cr0_restore mov cr0, %1 %%skip_cr0_restore: %endmacro ;; ; Saves the host state. ; ; @uses rax, rdx ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. ; @param pXState Define for the register containing the extended state pointer. ; %macro CPUMR0_SAVE_HOST 0 ; ; Load a couple of registers we'll use later in all branches. ; %ifdef IN_RING0 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] %elifdef IN_RC mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC] %else %error "Unsupported context!" %endif mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask] ; ; XSAVE or FXSAVE? ; or eax, eax jz %%host_fxsave ; XSAVE mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4] %ifdef RT_ARCH_AMD64 o64 xsave [pXState] %else xsave [pXState] %endif jmp %%host_done ; FXSAVE %%host_fxsave: %ifdef RT_ARCH_AMD64 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}. %else fxsave [pXState] %endif %%host_done: %endmacro ; CPUMR0_SAVE_HOST ;; ; Loads the host state. ; ; @uses rax, rdx ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. ; @param pXState Define for the register containing the extended state pointer. ; %macro CPUMR0_LOAD_HOST 0 ; ; Load a couple of registers we'll use later in all branches. ; %ifdef IN_RING0 mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateR0] %elifdef IN_RC mov pXState, [pCpumCpu + CPUMCPU.Host.pXStateRC] %else %error "Unsupported context!" %endif mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask] ; ; XRSTOR or FXRSTOR? ; or eax, eax jz %%host_fxrstor ; XRSTOR mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4] %ifdef RT_ARCH_AMD64 o64 xrstor [pXState] %else xrstor [pXState] %endif jmp %%host_done ; FXRSTOR %%host_fxrstor: %ifdef RT_ARCH_AMD64 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}. %else fxrstor [pXState] %endif %%host_done: %endmacro ; CPUMR0_LOAD_HOST ;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to ; save the 32-bit FPU state or 64-bit FPU state. ; ; @param %1 Pointer to CPUMCPU. ; @param %2 Pointer to XState. ; @param %3 Force AMD64 ; @param %4 The instruction to use (xsave or fxsave) ; @uses xAX, xDX, EFLAGS, 20h of stack. ; %macro SAVE_32_OR_64_FPU 4 %if CPUM_IS_AMD64 || %3 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE jnz short %%save_long_mode_guest %endif %4 [pXState] %if CPUM_IS_AMD64 || %3 jmp %%save_done_32bit_cs_ds %%save_long_mode_guest: o64 %4 [pXState] xor edx, edx cmp dword [pXState + X86FXSTATE.FPUCS], 0 jne short %%save_done sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0). fnstenv [rsp] movzx eax, word [rsp + 10h] mov [pXState + X86FXSTATE.FPUCS], eax movzx eax, word [rsp + 18h] add rsp, 20h mov [pXState + X86FXSTATE.FPUDS], eax %endif %%save_done_32bit_cs_ds: mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC %%save_done: mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx %endmacro ; SAVE_32_OR_64_FPU ;; ; Save the guest state. ; ; @uses rax, rdx ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. ; @param pXState Define for the register containing the extended state pointer. ; %macro CPUMR0_SAVE_GUEST 0 ; ; Load a couple of registers we'll use later in all branches. ; %ifdef IN_RING0 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] %elifdef IN_RC mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC] %else %error "Unsupported context!" %endif mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask] ; ; XSAVE or FXSAVE? ; or eax, eax jz %%guest_fxsave ; XSAVE mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4] %ifdef VBOX_WITH_KERNEL_USING_XMM and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm. %endif SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave jmp %%guest_done ; FXSAVE %%guest_fxsave: SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave %%guest_done: %endmacro ; CPUMR0_SAVE_GUEST ;; ; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did. ; ; @param %1 Pointer to CPUMCPU. ; @param %2 Pointer to XState. ; @param %3 Force AMD64. ; @param %4 The instruction to use (xrstor or fxrstor). ; @uses xAX, xDX, EFLAGS ; %macro RESTORE_32_OR_64_FPU 4 %if CPUM_IS_AMD64 || %3 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}. test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE jz %%restore_32bit_fpu cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC jne short %%restore_64bit_fpu %%restore_32bit_fpu: %endif %4 [pXState] %if CPUM_IS_AMD64 || %3 ; TODO: Restore XMM8-XMM15! jmp short %%restore_fpu_done %%restore_64bit_fpu: o64 %4 [pXState] %%restore_fpu_done: %endif %endmacro ; RESTORE_32_OR_64_FPU ;; ; Loads the guest state. ; ; @uses rax, rdx ; @param pCpumCpu Define for the register containing the CPUMCPU pointer. ; @param pXState Define for the register containing the extended state pointer. ; %macro CPUMR0_LOAD_GUEST 0 ; ; Load a couple of registers we'll use later in all branches. ; %ifdef IN_RING0 mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateR0] %elifdef IN_RC mov pXState, [pCpumCpu + CPUMCPU.Guest.pXStateRC] %else %error "Unsupported context!" %endif mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask] ; ; XRSTOR or FXRSTOR? ; or eax, eax jz %%guest_fxrstor ; XRSTOR mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4] %ifdef VBOX_WITH_KERNEL_USING_XMM and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm. %endif RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor jmp %%guest_done ; FXRSTOR %%guest_fxrstor: RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor %%guest_done: %endmacro ; CPUMR0_LOAD_GUEST