VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 107639

Last change on this file since 107639 was 107639, checked in by vboxsync, 3 weeks ago

VMM/CPUM: Report the host CPU microcode revision number to the guest when on an amd64 host. [missing assembly header update] jiraref:VBP-947

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1; $Id: CPUMInternal.mac 107639 2025-01-10 13:02:42Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2024 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28%include "VBox/asmdefs.mac"
29%include "VBox/vmm/cpum.mac"
30
31;; Check sanity.
32%ifdef VBOX_WITH_KERNEL_USING_XMM
33 %ifndef IN_RING0
34 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
35 %endif
36%endif
37
38;; For numeric expressions
39%ifdef RT_ARCH_AMD64
40 %define CPUM_IS_AMD64 1
41%else
42 %define CPUM_IS_AMD64 0
43%endif
44
45
46;;
47; CPU info
48struc CPUMINFO
49 .cMsrRanges resd 1 ; uint32_t
50 .fMsrMask resd 1 ; uint32_t
51 .fMxCsrMask resd 1 ; uint32_t
52 .cCpuIdLeaves resd 1 ; uint32_t
53 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
54 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
55 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
56 .uScalableBusFreq resq 1 ; uint64_t
57 .uMicrocodeRevision resd 1
58 alignb 8
59 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
60 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
61 .aCpuIdLeaves resb 256*32
62 .aMsrRanges resb 8192*128
63endstruc
64
65
66%define CPUM_USED_FPU_HOST RT_BIT(0)
67%define CPUM_USED_FPU_GUEST RT_BIT(10)
68%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
69%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
70%define CPUM_USE_SYSENTER RT_BIT(3)
71%define CPUM_USE_SYSCALL RT_BIT(4)
72%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
73%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
74%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
75%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
76%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
77%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
78%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
79
80
81struc CPUM
82 ;...
83 .fHostUseFlags resd 1
84
85 .u8PortableCpuIdLevel resb 1
86 .fPendingRestore resb 1
87 .fMtrrRead resb 1
88 .fMtrrWrite resb 1
89
90 alignb 8
91 .fXStateGuestMask resq 1
92 .fXStateHostMask resq 1
93
94 alignb 64
95 .HostFeatures resb 48
96 .GuestFeatures resb 48
97 .GuestInfo resb CPUMINFO_size
98
99 ; Patch manager saved state compatability CPUID leaf arrays
100 .aGuestCpuIdPatmStd resb 16*6
101 .aGuestCpuIdPatmExt resb 16*10
102 .aGuestCpuIdPatmCentaur resb 16*4
103
104 alignb 8
105 .cMsrWrites resq 1
106 .cMsrWritesToIgnoredBits resq 1
107 .cMsrWritesRaiseGp resq 1
108 .cMsrWritesUnknown resq 1
109 .cMsrReads resq 1
110 .cMsrReadsRaiseGp resq 1
111 .cMsrReadsUnknown resq 1
112endstruc
113
114struc CPUMCPU
115 ;
116 ; Guest context state
117 ;
118 .Guest resq 0
119 .Guest.eax resq 1
120 .Guest.ecx resq 1
121 .Guest.edx resq 1
122 .Guest.ebx resq 1
123 .Guest.esp resq 1
124 .Guest.ebp resq 1
125 .Guest.esi resq 1
126 .Guest.edi resq 1
127 .Guest.r8 resq 1
128 .Guest.r9 resq 1
129 .Guest.r10 resq 1
130 .Guest.r11 resq 1
131 .Guest.r12 resq 1
132 .Guest.r13 resq 1
133 .Guest.r14 resq 1
134 .Guest.r15 resq 1
135 .Guest.es.Sel resw 1
136 .Guest.es.PaddingSel resw 1
137 .Guest.es.ValidSel resw 1
138 .Guest.es.fFlags resw 1
139 .Guest.es.u64Base resq 1
140 .Guest.es.u32Limit resd 1
141 .Guest.es.Attr resd 1
142 .Guest.cs.Sel resw 1
143 .Guest.cs.PaddingSel resw 1
144 .Guest.cs.ValidSel resw 1
145 .Guest.cs.fFlags resw 1
146 .Guest.cs.u64Base resq 1
147 .Guest.cs.u32Limit resd 1
148 .Guest.cs.Attr resd 1
149 .Guest.ss.Sel resw 1
150 .Guest.ss.PaddingSel resw 1
151 .Guest.ss.ValidSel resw 1
152 .Guest.ss.fFlags resw 1
153 .Guest.ss.u64Base resq 1
154 .Guest.ss.u32Limit resd 1
155 .Guest.ss.Attr resd 1
156 .Guest.ds.Sel resw 1
157 .Guest.ds.PaddingSel resw 1
158 .Guest.ds.ValidSel resw 1
159 .Guest.ds.fFlags resw 1
160 .Guest.ds.u64Base resq 1
161 .Guest.ds.u32Limit resd 1
162 .Guest.ds.Attr resd 1
163 .Guest.fs.Sel resw 1
164 .Guest.fs.PaddingSel resw 1
165 .Guest.fs.ValidSel resw 1
166 .Guest.fs.fFlags resw 1
167 .Guest.fs.u64Base resq 1
168 .Guest.fs.u32Limit resd 1
169 .Guest.fs.Attr resd 1
170 .Guest.gs.Sel resw 1
171 .Guest.gs.PaddingSel resw 1
172 .Guest.gs.ValidSel resw 1
173 .Guest.gs.fFlags resw 1
174 .Guest.gs.u64Base resq 1
175 .Guest.gs.u32Limit resd 1
176 .Guest.gs.Attr resd 1
177 .Guest.ldtr.Sel resw 1
178 .Guest.ldtr.PaddingSel resw 1
179 .Guest.ldtr.ValidSel resw 1
180 .Guest.ldtr.fFlags resw 1
181 .Guest.ldtr.u64Base resq 1
182 .Guest.ldtr.u32Limit resd 1
183 .Guest.ldtr.Attr resd 1
184 .Guest.tr.Sel resw 1
185 .Guest.tr.PaddingSel resw 1
186 .Guest.tr.ValidSel resw 1
187 .Guest.tr.fFlags resw 1
188 .Guest.tr.u64Base resq 1
189 .Guest.tr.u32Limit resd 1
190 .Guest.tr.Attr resd 1
191 alignb 8
192 .Guest.eip resq 1
193 .Guest.eflags resq 1
194 .Guest.fExtrn resq 1
195 .Guest.uRipInhibitInt resq 1
196 .Guest.cr0 resq 1
197 .Guest.cr2 resq 1
198 .Guest.cr3 resq 1
199 .Guest.cr4 resq 1
200 .Guest.dr resq 8
201 .Guest.gdtrPadding resw 3
202 .Guest.gdtr resw 0
203 .Guest.gdtr.cbGdt resw 1
204 .Guest.gdtr.pGdt resq 1
205 .Guest.idtrPadding resw 3
206 .Guest.idtr resw 0
207 .Guest.idtr.cbIdt resw 1
208 .Guest.idtr.pIdt resq 1
209 .Guest.SysEnter.cs resb 8
210 .Guest.SysEnter.eip resb 8
211 .Guest.SysEnter.esp resb 8
212 .Guest.msrEFER resb 8
213 .Guest.msrSTAR resb 8
214 .Guest.msrPAT resb 8
215 .Guest.msrLSTAR resb 8
216 .Guest.msrCSTAR resb 8
217 .Guest.msrSFMASK resb 8
218 .Guest.msrKERNELGSBASE resb 8
219
220 alignb 32
221 .Guest.aPaePdpes resq 4
222
223 alignb 8
224 .Guest.aXcr resq 2
225 .Guest.fXStateMask resq 1
226 .Guest.fUsedFpuGuest resb 1
227 alignb 8
228 .Guest.aoffXState resw 64
229 alignb 256
230 .Guest.abXState resb 0x4000-0x300
231 .Guest.XState EQU .Guest.abXState
232
233;;
234 alignb 4096
235 .Guest.hwvirt resb 0
236 .Guest.hwvirt.svm resb 0
237 .Guest.hwvirt.vmx resb 0
238
239 .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
240 .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
241 .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
242 .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
243 .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
244 alignb 8
245 .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
246 .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
247 .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
248 .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
249 .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
250
251 .Guest.hwvirt.vmx.Vmcs resb 0x1000
252 .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
253 .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
254 .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
255 .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
256 .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
257 .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
258 .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
259 .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
260 alignb 8
261 .Guest.hwvirt.vmx.GCPhysVmxon resq 1
262 .Guest.hwvirt.vmx.GCPhysVmcs resq 1
263 .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
264 .Guest.hwvirt.vmx.enmDiag resd 1
265 .Guest.hwvirt.vmx.enmAbort resd 1
266 .Guest.hwvirt.vmx.uDiagAux resq 1
267 .Guest.hwvirt.vmx.uAbortAux resd 1
268 .Guest.hwvirt.vmx.fInVmxRootMode resb 1
269 .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
270 .Guest.hwvirt.vmx.fInterceptEvents resb 1
271 .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
272 .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
273 .Guest.hwvirt.vmx.uPrevPauseTick resq 1
274 .Guest.hwvirt.vmx.uEntryTick resq 1
275 .Guest.hwvirt.vmx.offVirtApicWrite resw 1
276 .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
277 alignb 8
278 .Guest.hwvirt.vmx.Msrs resb 224
279
280 alignb 8
281 .Guest.hwvirt.enmHwvirt resd 1
282 .Guest.hwvirt.fGif resb 1
283 alignb 4
284 .Guest.hwvirt.fSavedInhibit resd 1
285 alignb 64
286
287 .GuestMsrs resq 0
288 .GuestMsrs.au64 resq 64
289
290 ;
291 ; Other stuff.
292 ;
293 .hNestedVmxPreemptTimer resq 1
294
295 .fUseFlags resd 1
296 .fChanged resd 1
297 .u32RetCode resd 1
298 .fCpuIdApicFeatureVisible resb 1
299
300 ;
301 ; Host context state
302 ;
303 alignb 64
304 .Host resb 0
305 .Host.abXState resb 0x4000-0x300
306 .Host.XState EQU .Host.abXState
307 ;.Host.rax resq 1 - scratch
308 .Host.rbx resq 1
309 ;.Host.rcx resq 1 - scratch
310 ;.Host.rdx resq 1 - scratch
311 .Host.rdi resq 1
312 .Host.rsi resq 1
313 .Host.rbp resq 1
314 .Host.rsp resq 1
315 ;.Host.r8 resq 1 - scratch
316 ;.Host.r9 resq 1 - scratch
317 .Host.r10 resq 1
318 .Host.r11 resq 1
319 .Host.r12 resq 1
320 .Host.r13 resq 1
321 .Host.r14 resq 1
322 .Host.r15 resq 1
323 ;.Host.rip resd 1 - scratch
324 .Host.rflags resq 1
325 .Host.ss resw 1
326 .Host.ssPadding resw 1
327 .Host.gs resw 1
328 .Host.gsPadding resw 1
329 .Host.fs resw 1
330 .Host.fsPadding resw 1
331 .Host.es resw 1
332 .Host.esPadding resw 1
333 .Host.ds resw 1
334 .Host.dsPadding resw 1
335 .Host.cs resw 1
336 .Host.csPadding resw 1
337
338 .Host.cr0Fpu:
339 .Host.cr0 resq 1
340 ;.Host.cr2 resq 1 - scratch
341 .Host.cr3 resq 1
342 .Host.cr4 resq 1
343 .Host.cr8 resq 1
344
345 .Host.dr0 resq 1
346 .Host.dr1 resq 1
347 .Host.dr2 resq 1
348 .Host.dr3 resq 1
349 .Host.dr6 resq 1
350 .Host.dr7 resq 1
351
352 .Host.gdtr resb 10 ; GDT limit + linear address
353 .Host.gdtrPadding resw 1
354 .Host.idtr resb 10 ; IDT limit + linear address
355 .Host.idtrPadding resw 1
356 .Host.ldtr resw 1
357 .Host.ldtrPadding resw 1
358 .Host.tr resw 1
359 .Host.trPadding resw 1
360
361 .Host.SysEnter.cs resq 1
362 .Host.SysEnter.eip resq 1
363 .Host.SysEnter.esp resq 1
364 .Host.FSbase resq 1
365 .Host.GSbase resq 1
366 .Host.efer resq 1
367 alignb 8
368 .Host.xcr0 resq 1
369 .Host.fXStateMask resq 1
370
371 ;
372 ; Hypervisor Context.
373 ;
374 alignb 64
375 .Hyper resq 0
376 .Hyper.dr resq 8
377 .Hyper.cr3 resq 1
378 alignb 64
379
380%ifdef VBOX_WITH_CRASHDUMP_MAGIC
381 .aMagic resb 56
382 .uMagic resq 1
383%endif
384endstruc
385
386
387
388%if 0 ; Currently not used anywhere.
389;;
390; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
391;
392; Cleans the FPU state, if necessary, before restoring the FPU.
393;
394; This macro ASSUMES CR0.TS is not set!
395;
396; @param xDX Pointer to CPUMCPU.
397; @uses xAX, EFLAGS
398;
399; Changes here should also be reflected in CPUMRCA.asm's copy!
400;
401%macro CLEANFPU 0
402 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
403 jz .nothing_to_clean
404
405 xor eax, eax
406 fnstsw ax ; FSW -> AX.
407 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
408 ; while clearing & loading the FPU bits in 'clean_fpu' below.
409 jz .clean_fpu
410 fnclex
411
412.clean_fpu:
413 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
414 ; for the upcoming push (load)
415 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
416.nothing_to_clean:
417%endmacro
418%endif ; Unused.
419
420
421;;
422; Makes sure we don't trap (#NM) accessing the FPU.
423;
424; In ring-0 this is a bit of work since we may have try convince the host kernel
425; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
426; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
427;
428; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
429; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
430; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
431;
432; In raw-mode we will always have to clear TS and it will be recalculated
433; elsewhere and thus needs no saving.
434;
435; @param %1 Register to return the return status code in.
436; @param %2 Temporary scratch register.
437; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
438; of the EMT we're on.
439; @uses EFLAGS, CR0, %1, %2
440;
441%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
442 ;
443 ; ring-0 - slightly complicated (than old raw-mode).
444 ;
445 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
446 mov [%3 + CPUMCPU.Host.cr0Fpu], %1
447
448 mov %2, cr0
449 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
450 jz %%no_cr0_change
451
452 %ifdef VMM_R0_TOUCH_FPU
453 ; Touch the state and check that the kernel updated CR0 for us.
454 movdqa xmm0, xmm0
455 mov %2, cr0
456 test %2, X86_CR0_TS | X86_CR0_EM
457 jz %%cr0_changed
458 %endif
459
460 ; Save CR0 and clear them flags ourselves.
461 mov [%3 + CPUMCPU.Host.cr0Fpu], %2
462 and %2, ~(X86_CR0_TS | X86_CR0_EM)
463 mov cr0, %2
464
465%%cr0_changed:
466 mov %1, VINF_CPUM_HOST_CR0_MODIFIED
467%%no_cr0_change:
468%endmacro
469
470
471;;
472; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
473;
474; @param %1 The original state to restore (or zero).
475;
476%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
477 test %1, X86_CR0_TS | X86_CR0_EM
478 jz %%skip_cr0_restore
479 mov cr0, %1
480%%skip_cr0_restore:
481%endmacro
482
483
484;;
485; Saves the host state.
486;
487; @uses rax, rdx
488; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
489; @param pXState Define for the register containing the extended state pointer.
490;
491%macro CPUMR0_SAVE_HOST 0
492 ;
493 ; Load a couple of registers we'll use later in all branches.
494 ;
495 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
496 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
497
498 ;
499 ; XSAVE or FXSAVE?
500 ;
501 or eax, eax
502 jz %%host_fxsave
503
504 ; XSAVE
505 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
506 %ifdef RT_ARCH_AMD64
507 o64 xsave [pXState]
508 %else
509 xsave [pXState]
510 %endif
511 jmp %%host_done
512
513 ; FXSAVE
514%%host_fxsave:
515 %ifdef RT_ARCH_AMD64
516 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
517 %else
518 fxsave [pXState]
519 %endif
520
521%%host_done:
522%endmacro ; CPUMR0_SAVE_HOST
523
524
525;;
526; Loads the host state.
527;
528; @uses rax, rdx
529; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
530; @param pXState Define for the register containing the extended state pointer.
531;
532%macro CPUMR0_LOAD_HOST 0
533 ;
534 ; Load a couple of registers we'll use later in all branches.
535 ;
536 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
537 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
538
539 ;
540 ; XRSTOR or FXRSTOR?
541 ;
542 or eax, eax
543 jz %%host_fxrstor
544
545 ; XRSTOR
546 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
547 %ifdef RT_ARCH_AMD64
548 o64 xrstor [pXState]
549 %else
550 xrstor [pXState]
551 %endif
552 jmp %%host_done
553
554 ; FXRSTOR
555%%host_fxrstor:
556 %ifdef RT_ARCH_AMD64
557 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
558 %else
559 fxrstor [pXState]
560 %endif
561
562%%host_done:
563%endmacro ; CPUMR0_LOAD_HOST
564
565
566
567;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
568; save the 32-bit FPU state or 64-bit FPU state.
569;
570; @param %1 Pointer to CPUMCPU.
571; @param %2 Pointer to XState.
572; @param %3 Force AMD64
573; @param %4 The instruction to use (xsave or fxsave)
574; @uses xAX, xDX, EFLAGS, 20h of stack.
575;
576%macro SAVE_32_OR_64_FPU 4
577%if CPUM_IS_AMD64 || %3
578 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
579 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
580 jnz short %%save_long_mode_guest
581%endif
582 %4 [pXState]
583%if CPUM_IS_AMD64 || %3
584 jmp %%save_done_32bit_cs_ds
585
586%%save_long_mode_guest:
587 o64 %4 [pXState]
588
589 xor edx, edx
590 cmp dword [pXState + X86FXSTATE.FPUCS], 0
591 jne short %%save_done
592
593 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
594 fnstenv [rsp]
595 movzx eax, word [rsp + 10h]
596 mov [pXState + X86FXSTATE.FPUCS], eax
597 movzx eax, word [rsp + 18h]
598 add rsp, 20h
599 mov [pXState + X86FXSTATE.FPUDS], eax
600%endif
601%%save_done_32bit_cs_ds:
602 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
603%%save_done:
604 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
605%endmacro ; SAVE_32_OR_64_FPU
606
607
608;;
609; Save the guest state.
610;
611; @uses rax, rdx
612; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
613; @param pXState Define for the register containing the extended state pointer.
614;
615%macro CPUMR0_SAVE_GUEST 0
616 ;
617 ; Load a couple of registers we'll use later in all branches.
618 ;
619 %ifdef IN_RING0
620 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
621 %else
622 %error "Unsupported context!"
623 %endif
624 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
625
626 ;
627 ; XSAVE or FXSAVE?
628 ;
629 or eax, eax
630 jz %%guest_fxsave
631
632 ; XSAVE
633 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
634 %ifdef VBOX_WITH_KERNEL_USING_XMM
635 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
636 %endif
637 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
638 jmp %%guest_done
639
640 ; FXSAVE
641%%guest_fxsave:
642 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
643
644%%guest_done:
645%endmacro ; CPUMR0_SAVE_GUEST
646
647
648;;
649; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
650;
651; @param %1 Pointer to CPUMCPU.
652; @param %2 Pointer to XState.
653; @param %3 Force AMD64.
654; @param %4 The instruction to use (xrstor or fxrstor).
655; @uses xAX, xDX, EFLAGS
656;
657%macro RESTORE_32_OR_64_FPU 4
658%if CPUM_IS_AMD64 || %3
659 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
660 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
661 jz %%restore_32bit_fpu
662 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
663 jne short %%restore_64bit_fpu
664%%restore_32bit_fpu:
665%endif
666 %4 [pXState]
667%if CPUM_IS_AMD64 || %3
668 ; TODO: Restore XMM8-XMM15!
669 jmp short %%restore_fpu_done
670%%restore_64bit_fpu:
671 o64 %4 [pXState]
672%%restore_fpu_done:
673%endif
674%endmacro ; RESTORE_32_OR_64_FPU
675
676
677;;
678; Loads the guest state.
679;
680; @uses rax, rdx
681; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
682; @param pXState Define for the register containing the extended state pointer.
683;
684%macro CPUMR0_LOAD_GUEST 0
685 ;
686 ; Load a couple of registers we'll use later in all branches.
687 ;
688 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
689 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
690
691 ;
692 ; XRSTOR or FXRSTOR?
693 ;
694 or eax, eax
695 jz %%guest_fxrstor
696
697 ; XRSTOR
698 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
699 %ifdef VBOX_WITH_KERNEL_USING_XMM
700 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
701 %endif
702 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
703 jmp %%guest_done
704
705 ; FXRSTOR
706%%guest_fxrstor:
707 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
708
709%%guest_done:
710%endmacro ; CPUMR0_LOAD_GUEST
711
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette