VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 107278

Last change on this file since 107278 was 107220, checked in by vboxsync, 2 months ago

VMM/CPUM,TM: Removed obsolete raw-mode CR4 masks. jiraref:VBP-1466

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.1 KB
Line 
1; $Id: CPUMInternal.mac 107220 2024-12-03 09:46:18Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2024 Oracle and/or its affiliates.
8;
9; This file is part of VirtualBox base platform packages, as
10; available from https://www.virtualbox.org.
11;
12; This program is free software; you can redistribute it and/or
13; modify it under the terms of the GNU General Public License
14; as published by the Free Software Foundation, in version 3 of the
15; License.
16;
17; This program is distributed in the hope that it will be useful, but
18; WITHOUT ANY WARRANTY; without even the implied warranty of
19; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20; General Public License for more details.
21;
22; You should have received a copy of the GNU General Public License
23; along with this program; if not, see <https://www.gnu.org/licenses>.
24;
25; SPDX-License-Identifier: GPL-3.0-only
26;
27
28%include "VBox/asmdefs.mac"
29%include "VBox/vmm/cpum.mac"
30
31;; Check sanity.
32%ifdef VBOX_WITH_KERNEL_USING_XMM
33 %ifndef IN_RING0
34 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
35 %endif
36%endif
37
38;; For numeric expressions
39%ifdef RT_ARCH_AMD64
40 %define CPUM_IS_AMD64 1
41%else
42 %define CPUM_IS_AMD64 0
43%endif
44
45
46;;
47; CPU info
48struc CPUMINFO
49 .cMsrRanges resd 1 ; uint32_t
50 .fMsrMask resd 1 ; uint32_t
51 .fMxCsrMask resd 1 ; uint32_t
52 .cCpuIdLeaves resd 1 ; uint32_t
53 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
54 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
55 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
56 .uScalableBusFreq resq 1 ; uint64_t
57 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
58 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
59 .aCpuIdLeaves resb 256*32
60 .aMsrRanges resb 8192*128
61endstruc
62
63
64%define CPUM_USED_FPU_HOST RT_BIT(0)
65%define CPUM_USED_FPU_GUEST RT_BIT(10)
66%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
67%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
68%define CPUM_USE_SYSENTER RT_BIT(3)
69%define CPUM_USE_SYSCALL RT_BIT(4)
70%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
71%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
72%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
73%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
74%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
75%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
76%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
77
78
79struc CPUM
80 ;...
81 .fHostUseFlags resd 1
82
83 .u8PortableCpuIdLevel resb 1
84 .fPendingRestore resb 1
85 .fMtrrRead resb 1
86 .fMtrrWrite resb 1
87
88 alignb 8
89 .fXStateGuestMask resq 1
90 .fXStateHostMask resq 1
91
92 alignb 64
93 .HostFeatures resb 48
94 .GuestFeatures resb 48
95 .GuestInfo resb CPUMINFO_size
96
97 ; Patch manager saved state compatability CPUID leaf arrays
98 .aGuestCpuIdPatmStd resb 16*6
99 .aGuestCpuIdPatmExt resb 16*10
100 .aGuestCpuIdPatmCentaur resb 16*4
101
102 alignb 8
103 .cMsrWrites resq 1
104 .cMsrWritesToIgnoredBits resq 1
105 .cMsrWritesRaiseGp resq 1
106 .cMsrWritesUnknown resq 1
107 .cMsrReads resq 1
108 .cMsrReadsRaiseGp resq 1
109 .cMsrReadsUnknown resq 1
110endstruc
111
112struc CPUMCPU
113 ;
114 ; Guest context state
115 ;
116 .Guest resq 0
117 .Guest.eax resq 1
118 .Guest.ecx resq 1
119 .Guest.edx resq 1
120 .Guest.ebx resq 1
121 .Guest.esp resq 1
122 .Guest.ebp resq 1
123 .Guest.esi resq 1
124 .Guest.edi resq 1
125 .Guest.r8 resq 1
126 .Guest.r9 resq 1
127 .Guest.r10 resq 1
128 .Guest.r11 resq 1
129 .Guest.r12 resq 1
130 .Guest.r13 resq 1
131 .Guest.r14 resq 1
132 .Guest.r15 resq 1
133 .Guest.es.Sel resw 1
134 .Guest.es.PaddingSel resw 1
135 .Guest.es.ValidSel resw 1
136 .Guest.es.fFlags resw 1
137 .Guest.es.u64Base resq 1
138 .Guest.es.u32Limit resd 1
139 .Guest.es.Attr resd 1
140 .Guest.cs.Sel resw 1
141 .Guest.cs.PaddingSel resw 1
142 .Guest.cs.ValidSel resw 1
143 .Guest.cs.fFlags resw 1
144 .Guest.cs.u64Base resq 1
145 .Guest.cs.u32Limit resd 1
146 .Guest.cs.Attr resd 1
147 .Guest.ss.Sel resw 1
148 .Guest.ss.PaddingSel resw 1
149 .Guest.ss.ValidSel resw 1
150 .Guest.ss.fFlags resw 1
151 .Guest.ss.u64Base resq 1
152 .Guest.ss.u32Limit resd 1
153 .Guest.ss.Attr resd 1
154 .Guest.ds.Sel resw 1
155 .Guest.ds.PaddingSel resw 1
156 .Guest.ds.ValidSel resw 1
157 .Guest.ds.fFlags resw 1
158 .Guest.ds.u64Base resq 1
159 .Guest.ds.u32Limit resd 1
160 .Guest.ds.Attr resd 1
161 .Guest.fs.Sel resw 1
162 .Guest.fs.PaddingSel resw 1
163 .Guest.fs.ValidSel resw 1
164 .Guest.fs.fFlags resw 1
165 .Guest.fs.u64Base resq 1
166 .Guest.fs.u32Limit resd 1
167 .Guest.fs.Attr resd 1
168 .Guest.gs.Sel resw 1
169 .Guest.gs.PaddingSel resw 1
170 .Guest.gs.ValidSel resw 1
171 .Guest.gs.fFlags resw 1
172 .Guest.gs.u64Base resq 1
173 .Guest.gs.u32Limit resd 1
174 .Guest.gs.Attr resd 1
175 .Guest.ldtr.Sel resw 1
176 .Guest.ldtr.PaddingSel resw 1
177 .Guest.ldtr.ValidSel resw 1
178 .Guest.ldtr.fFlags resw 1
179 .Guest.ldtr.u64Base resq 1
180 .Guest.ldtr.u32Limit resd 1
181 .Guest.ldtr.Attr resd 1
182 .Guest.tr.Sel resw 1
183 .Guest.tr.PaddingSel resw 1
184 .Guest.tr.ValidSel resw 1
185 .Guest.tr.fFlags resw 1
186 .Guest.tr.u64Base resq 1
187 .Guest.tr.u32Limit resd 1
188 .Guest.tr.Attr resd 1
189 alignb 8
190 .Guest.eip resq 1
191 .Guest.eflags resq 1
192 .Guest.fExtrn resq 1
193 .Guest.uRipInhibitInt resq 1
194 .Guest.cr0 resq 1
195 .Guest.cr2 resq 1
196 .Guest.cr3 resq 1
197 .Guest.cr4 resq 1
198 .Guest.dr resq 8
199 .Guest.gdtrPadding resw 3
200 .Guest.gdtr resw 0
201 .Guest.gdtr.cbGdt resw 1
202 .Guest.gdtr.pGdt resq 1
203 .Guest.idtrPadding resw 3
204 .Guest.idtr resw 0
205 .Guest.idtr.cbIdt resw 1
206 .Guest.idtr.pIdt resq 1
207 .Guest.SysEnter.cs resb 8
208 .Guest.SysEnter.eip resb 8
209 .Guest.SysEnter.esp resb 8
210 .Guest.msrEFER resb 8
211 .Guest.msrSTAR resb 8
212 .Guest.msrPAT resb 8
213 .Guest.msrLSTAR resb 8
214 .Guest.msrCSTAR resb 8
215 .Guest.msrSFMASK resb 8
216 .Guest.msrKERNELGSBASE resb 8
217
218 alignb 32
219 .Guest.aPaePdpes resq 4
220
221 alignb 8
222 .Guest.aXcr resq 2
223 .Guest.fXStateMask resq 1
224 .Guest.fUsedFpuGuest resb 1
225 alignb 8
226 .Guest.aoffXState resw 64
227 alignb 256
228 .Guest.abXState resb 0x4000-0x300
229 .Guest.XState EQU .Guest.abXState
230
231;;
232 alignb 4096
233 .Guest.hwvirt resb 0
234 .Guest.hwvirt.svm resb 0
235 .Guest.hwvirt.vmx resb 0
236
237 .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
238 .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
239 .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
240 .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
241 .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
242 alignb 8
243 .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
244 .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
245 .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
246 .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
247 .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
248
249 .Guest.hwvirt.vmx.Vmcs resb 0x1000
250 .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
251 .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
252 .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
253 .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
254 .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
255 .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
256 .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
257 .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
258 alignb 8
259 .Guest.hwvirt.vmx.GCPhysVmxon resq 1
260 .Guest.hwvirt.vmx.GCPhysVmcs resq 1
261 .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
262 .Guest.hwvirt.vmx.enmDiag resd 1
263 .Guest.hwvirt.vmx.enmAbort resd 1
264 .Guest.hwvirt.vmx.uDiagAux resq 1
265 .Guest.hwvirt.vmx.uAbortAux resd 1
266 .Guest.hwvirt.vmx.fInVmxRootMode resb 1
267 .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
268 .Guest.hwvirt.vmx.fInterceptEvents resb 1
269 .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
270 .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
271 .Guest.hwvirt.vmx.uPrevPauseTick resq 1
272 .Guest.hwvirt.vmx.uEntryTick resq 1
273 .Guest.hwvirt.vmx.offVirtApicWrite resw 1
274 .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
275 alignb 8
276 .Guest.hwvirt.vmx.Msrs resb 224
277
278 alignb 8
279 .Guest.hwvirt.enmHwvirt resd 1
280 .Guest.hwvirt.fGif resb 1
281 alignb 4
282 .Guest.hwvirt.fSavedInhibit resd 1
283 alignb 64
284
285 .GuestMsrs resq 0
286 .GuestMsrs.au64 resq 64
287
288 ;
289 ; Other stuff.
290 ;
291 .hNestedVmxPreemptTimer resq 1
292
293 .fUseFlags resd 1
294 .fChanged resd 1
295 .u32RetCode resd 1
296 .fCpuIdApicFeatureVisible resb 1
297
298 ;
299 ; Host context state
300 ;
301 alignb 64
302 .Host resb 0
303 .Host.abXState resb 0x4000-0x300
304 .Host.XState EQU .Host.abXState
305 ;.Host.rax resq 1 - scratch
306 .Host.rbx resq 1
307 ;.Host.rcx resq 1 - scratch
308 ;.Host.rdx resq 1 - scratch
309 .Host.rdi resq 1
310 .Host.rsi resq 1
311 .Host.rbp resq 1
312 .Host.rsp resq 1
313 ;.Host.r8 resq 1 - scratch
314 ;.Host.r9 resq 1 - scratch
315 .Host.r10 resq 1
316 .Host.r11 resq 1
317 .Host.r12 resq 1
318 .Host.r13 resq 1
319 .Host.r14 resq 1
320 .Host.r15 resq 1
321 ;.Host.rip resd 1 - scratch
322 .Host.rflags resq 1
323 .Host.ss resw 1
324 .Host.ssPadding resw 1
325 .Host.gs resw 1
326 .Host.gsPadding resw 1
327 .Host.fs resw 1
328 .Host.fsPadding resw 1
329 .Host.es resw 1
330 .Host.esPadding resw 1
331 .Host.ds resw 1
332 .Host.dsPadding resw 1
333 .Host.cs resw 1
334 .Host.csPadding resw 1
335
336 .Host.cr0Fpu:
337 .Host.cr0 resq 1
338 ;.Host.cr2 resq 1 - scratch
339 .Host.cr3 resq 1
340 .Host.cr4 resq 1
341 .Host.cr8 resq 1
342
343 .Host.dr0 resq 1
344 .Host.dr1 resq 1
345 .Host.dr2 resq 1
346 .Host.dr3 resq 1
347 .Host.dr6 resq 1
348 .Host.dr7 resq 1
349
350 .Host.gdtr resb 10 ; GDT limit + linear address
351 .Host.gdtrPadding resw 1
352 .Host.idtr resb 10 ; IDT limit + linear address
353 .Host.idtrPadding resw 1
354 .Host.ldtr resw 1
355 .Host.ldtrPadding resw 1
356 .Host.tr resw 1
357 .Host.trPadding resw 1
358
359 .Host.SysEnter.cs resq 1
360 .Host.SysEnter.eip resq 1
361 .Host.SysEnter.esp resq 1
362 .Host.FSbase resq 1
363 .Host.GSbase resq 1
364 .Host.efer resq 1
365 alignb 8
366 .Host.xcr0 resq 1
367 .Host.fXStateMask resq 1
368
369 ;
370 ; Hypervisor Context.
371 ;
372 alignb 64
373 .Hyper resq 0
374 .Hyper.dr resq 8
375 .Hyper.cr3 resq 1
376 alignb 64
377
378%ifdef VBOX_WITH_CRASHDUMP_MAGIC
379 .aMagic resb 56
380 .uMagic resq 1
381%endif
382endstruc
383
384
385
386%if 0 ; Currently not used anywhere.
387;;
388; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
389;
390; Cleans the FPU state, if necessary, before restoring the FPU.
391;
392; This macro ASSUMES CR0.TS is not set!
393;
394; @param xDX Pointer to CPUMCPU.
395; @uses xAX, EFLAGS
396;
397; Changes here should also be reflected in CPUMRCA.asm's copy!
398;
399%macro CLEANFPU 0
400 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
401 jz .nothing_to_clean
402
403 xor eax, eax
404 fnstsw ax ; FSW -> AX.
405 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
406 ; while clearing & loading the FPU bits in 'clean_fpu' below.
407 jz .clean_fpu
408 fnclex
409
410.clean_fpu:
411 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
412 ; for the upcoming push (load)
413 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
414.nothing_to_clean:
415%endmacro
416%endif ; Unused.
417
418
419;;
420; Makes sure we don't trap (#NM) accessing the FPU.
421;
422; In ring-0 this is a bit of work since we may have try convince the host kernel
423; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
424; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
425;
426; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
427; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
428; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
429;
430; In raw-mode we will always have to clear TS and it will be recalculated
431; elsewhere and thus needs no saving.
432;
433; @param %1 Register to return the return status code in.
434; @param %2 Temporary scratch register.
435; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
436; of the EMT we're on.
437; @uses EFLAGS, CR0, %1, %2
438;
439%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
440 ;
441 ; ring-0 - slightly complicated (than old raw-mode).
442 ;
443 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
444 mov [%3 + CPUMCPU.Host.cr0Fpu], %1
445
446 mov %2, cr0
447 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
448 jz %%no_cr0_change
449
450 %ifdef VMM_R0_TOUCH_FPU
451 ; Touch the state and check that the kernel updated CR0 for us.
452 movdqa xmm0, xmm0
453 mov %2, cr0
454 test %2, X86_CR0_TS | X86_CR0_EM
455 jz %%cr0_changed
456 %endif
457
458 ; Save CR0 and clear them flags ourselves.
459 mov [%3 + CPUMCPU.Host.cr0Fpu], %2
460 and %2, ~(X86_CR0_TS | X86_CR0_EM)
461 mov cr0, %2
462
463%%cr0_changed:
464 mov %1, VINF_CPUM_HOST_CR0_MODIFIED
465%%no_cr0_change:
466%endmacro
467
468
469;;
470; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
471;
472; @param %1 The original state to restore (or zero).
473;
474%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
475 test %1, X86_CR0_TS | X86_CR0_EM
476 jz %%skip_cr0_restore
477 mov cr0, %1
478%%skip_cr0_restore:
479%endmacro
480
481
482;;
483; Saves the host state.
484;
485; @uses rax, rdx
486; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
487; @param pXState Define for the register containing the extended state pointer.
488;
489%macro CPUMR0_SAVE_HOST 0
490 ;
491 ; Load a couple of registers we'll use later in all branches.
492 ;
493 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
494 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
495
496 ;
497 ; XSAVE or FXSAVE?
498 ;
499 or eax, eax
500 jz %%host_fxsave
501
502 ; XSAVE
503 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
504 %ifdef RT_ARCH_AMD64
505 o64 xsave [pXState]
506 %else
507 xsave [pXState]
508 %endif
509 jmp %%host_done
510
511 ; FXSAVE
512%%host_fxsave:
513 %ifdef RT_ARCH_AMD64
514 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
515 %else
516 fxsave [pXState]
517 %endif
518
519%%host_done:
520%endmacro ; CPUMR0_SAVE_HOST
521
522
523;;
524; Loads the host state.
525;
526; @uses rax, rdx
527; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
528; @param pXState Define for the register containing the extended state pointer.
529;
530%macro CPUMR0_LOAD_HOST 0
531 ;
532 ; Load a couple of registers we'll use later in all branches.
533 ;
534 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
535 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
536
537 ;
538 ; XRSTOR or FXRSTOR?
539 ;
540 or eax, eax
541 jz %%host_fxrstor
542
543 ; XRSTOR
544 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
545 %ifdef RT_ARCH_AMD64
546 o64 xrstor [pXState]
547 %else
548 xrstor [pXState]
549 %endif
550 jmp %%host_done
551
552 ; FXRSTOR
553%%host_fxrstor:
554 %ifdef RT_ARCH_AMD64
555 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
556 %else
557 fxrstor [pXState]
558 %endif
559
560%%host_done:
561%endmacro ; CPUMR0_LOAD_HOST
562
563
564
565;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
566; save the 32-bit FPU state or 64-bit FPU state.
567;
568; @param %1 Pointer to CPUMCPU.
569; @param %2 Pointer to XState.
570; @param %3 Force AMD64
571; @param %4 The instruction to use (xsave or fxsave)
572; @uses xAX, xDX, EFLAGS, 20h of stack.
573;
574%macro SAVE_32_OR_64_FPU 4
575%if CPUM_IS_AMD64 || %3
576 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
577 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
578 jnz short %%save_long_mode_guest
579%endif
580 %4 [pXState]
581%if CPUM_IS_AMD64 || %3
582 jmp %%save_done_32bit_cs_ds
583
584%%save_long_mode_guest:
585 o64 %4 [pXState]
586
587 xor edx, edx
588 cmp dword [pXState + X86FXSTATE.FPUCS], 0
589 jne short %%save_done
590
591 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
592 fnstenv [rsp]
593 movzx eax, word [rsp + 10h]
594 mov [pXState + X86FXSTATE.FPUCS], eax
595 movzx eax, word [rsp + 18h]
596 add rsp, 20h
597 mov [pXState + X86FXSTATE.FPUDS], eax
598%endif
599%%save_done_32bit_cs_ds:
600 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
601%%save_done:
602 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
603%endmacro ; SAVE_32_OR_64_FPU
604
605
606;;
607; Save the guest state.
608;
609; @uses rax, rdx
610; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
611; @param pXState Define for the register containing the extended state pointer.
612;
613%macro CPUMR0_SAVE_GUEST 0
614 ;
615 ; Load a couple of registers we'll use later in all branches.
616 ;
617 %ifdef IN_RING0
618 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
619 %else
620 %error "Unsupported context!"
621 %endif
622 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
623
624 ;
625 ; XSAVE or FXSAVE?
626 ;
627 or eax, eax
628 jz %%guest_fxsave
629
630 ; XSAVE
631 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
632 %ifdef VBOX_WITH_KERNEL_USING_XMM
633 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
634 %endif
635 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
636 jmp %%guest_done
637
638 ; FXSAVE
639%%guest_fxsave:
640 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
641
642%%guest_done:
643%endmacro ; CPUMR0_SAVE_GUEST
644
645
646;;
647; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
648;
649; @param %1 Pointer to CPUMCPU.
650; @param %2 Pointer to XState.
651; @param %3 Force AMD64.
652; @param %4 The instruction to use (xrstor or fxrstor).
653; @uses xAX, xDX, EFLAGS
654;
655%macro RESTORE_32_OR_64_FPU 4
656%if CPUM_IS_AMD64 || %3
657 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
658 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
659 jz %%restore_32bit_fpu
660 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
661 jne short %%restore_64bit_fpu
662%%restore_32bit_fpu:
663%endif
664 %4 [pXState]
665%if CPUM_IS_AMD64 || %3
666 ; TODO: Restore XMM8-XMM15!
667 jmp short %%restore_fpu_done
668%%restore_64bit_fpu:
669 o64 %4 [pXState]
670%%restore_fpu_done:
671%endif
672%endmacro ; RESTORE_32_OR_64_FPU
673
674
675;;
676; Loads the guest state.
677;
678; @uses rax, rdx
679; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
680; @param pXState Define for the register containing the extended state pointer.
681;
682%macro CPUMR0_LOAD_GUEST 0
683 ;
684 ; Load a couple of registers we'll use later in all branches.
685 ;
686 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
687 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
688
689 ;
690 ; XRSTOR or FXRSTOR?
691 ;
692 or eax, eax
693 jz %%guest_fxrstor
694
695 ; XRSTOR
696 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
697 %ifdef VBOX_WITH_KERNEL_USING_XMM
698 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
699 %endif
700 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
701 jmp %%guest_done
702
703 ; FXRSTOR
704%%guest_fxrstor:
705 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
706
707%%guest_done:
708%endmacro ; CPUMR0_LOAD_GUEST
709
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette