VirtualBox

source: vbox/trunk/src/VBox/VMM/include/CPUMInternal.mac@ 95394

Last change on this file since 95394 was 94934, checked in by vboxsync, 3 years ago

VMM/CPUM: Kicked out the VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI code, as it was for raw-mode use. bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 24.2 KB
Line 
1; $Id: CPUMInternal.mac 94934 2022-05-09 08:41:13Z vboxsync $
2;; @file
3; CPUM - Internal header file (asm).
4;
5
6;
7; Copyright (C) 2006-2022 Oracle Corporation
8;
9; This file is part of VirtualBox Open Source Edition (OSE), as
10; available from http://www.virtualbox.org. This file is free software;
11; you can redistribute it and/or modify it under the terms of the GNU
12; General Public License (GPL) as published by the Free Software
13; Foundation, in version 2 as it comes in the "COPYING" file of the
14; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16;
17
18%include "VBox/asmdefs.mac"
19%include "VBox/vmm/cpum.mac"
20
21;; Check sanity.
22%ifdef VBOX_WITH_KERNEL_USING_XMM
23 %ifndef IN_RING0
24 %error "What? We've got code assuming VBOX_WITH_KERNEL_USING_XMM is only defined in ring-0!"
25 %endif
26%endif
27
28;; For numeric expressions
29%ifdef RT_ARCH_AMD64
30 %define CPUM_IS_AMD64 1
31%else
32 %define CPUM_IS_AMD64 0
33%endif
34
35
36;;
37; CPU info
38struc CPUMINFO
39 .cMsrRanges resd 1 ; uint32_t
40 .fMsrMask resd 1 ; uint32_t
41 .fMxCsrMask resd 1 ; uint32_t
42 .cCpuIdLeaves resd 1 ; uint32_t
43 .iFirstExtCpuIdLeaf resd 1 ; uint32_t
44 .enmUnknownCpuIdMethod resd 1 ; CPUMUNKNOWNCPUID
45 .DefCpuId resb CPUMCPUID_size ; CPUMCPUID
46 .uScalableBusFreq resq 1 ; uint64_t
47 .paMsrRangesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMMSRRANGE)
48 .paCpuIdLeavesR3 RTR3PTR_RES 1 ; R3PTRTYPE(PCPUMCPUIDLEAF)
49 .aCpuIdLeaves resb 256*32
50 .aMsrRanges resb 8192*128
51endstruc
52
53
54%define CPUM_USED_FPU_HOST RT_BIT(0)
55%define CPUM_USED_FPU_GUEST RT_BIT(10)
56%define CPUM_USED_FPU_SINCE_REM RT_BIT(1)
57%define CPUM_USED_MANUAL_XMM_RESTORE RT_BIT(2)
58%define CPUM_USE_SYSENTER RT_BIT(3)
59%define CPUM_USE_SYSCALL RT_BIT(4)
60%define CPUM_USE_DEBUG_REGS_HOST RT_BIT(5)
61%define CPUM_USED_DEBUG_REGS_HOST RT_BIT(6)
62%define CPUM_USE_DEBUG_REGS_HYPER RT_BIT(7)
63%define CPUM_USED_DEBUG_REGS_HYPER RT_BIT(8)
64%define CPUM_USED_DEBUG_REGS_GUEST RT_BIT(9)
65%define CPUM_USE_FFXSR_LEAKY RT_BIT(19)
66%define CPUM_USE_SUPPORTS_LONGMODE RT_BIT(20)
67
68%define CPUM_HANDLER_DS 1
69%define CPUM_HANDLER_ES 2
70%define CPUM_HANDLER_FS 3
71%define CPUM_HANDLER_GS 4
72%define CPUM_HANDLER_IRET 5
73%define CPUM_HANDLER_TYPEMASK 0ffh
74%define CPUM_HANDLER_CTXCORE_IN_EBP RT_BIT(31)
75
76
77struc CPUM
78 ;...
79 .fHostUseFlags resd 1
80
81 ; CR4 masks
82 .CR4.AndMask resd 1
83 .CR4.OrMask resd 1
84 .u8PortableCpuIdLevel resb 1
85 .fPendingRestore resb 1
86
87 alignb 8
88 .fXStateGuestMask resq 1
89 .fXStateHostMask resq 1
90
91 alignb 64
92 .HostFeatures resb 48
93 .GuestFeatures resb 48
94 .GuestInfo resb CPUMINFO_size
95
96 ; Patch manager saved state compatability CPUID leaf arrays
97 .aGuestCpuIdPatmStd resb 16*6
98 .aGuestCpuIdPatmExt resb 16*10
99 .aGuestCpuIdPatmCentaur resb 16*4
100
101 alignb 8
102 .cMsrWrites resq 1
103 .cMsrWritesToIgnoredBits resq 1
104 .cMsrWritesRaiseGp resq 1
105 .cMsrWritesUnknown resq 1
106 .cMsrReads resq 1
107 .cMsrReadsRaiseGp resq 1
108 .cMsrReadsUnknown resq 1
109endstruc
110
111struc CPUMCPU
112 ;
113 ; Guest context state
114 ; (Identical to the .Hyper chunk below and to CPUMCTX in cpum.mac.)
115 ;
116 .Guest resq 0
117 .Guest.eax resq 1
118 .Guest.ecx resq 1
119 .Guest.edx resq 1
120 .Guest.ebx resq 1
121 .Guest.esp resq 1
122 .Guest.ebp resq 1
123 .Guest.esi resq 1
124 .Guest.edi resq 1
125 .Guest.r8 resq 1
126 .Guest.r9 resq 1
127 .Guest.r10 resq 1
128 .Guest.r11 resq 1
129 .Guest.r12 resq 1
130 .Guest.r13 resq 1
131 .Guest.r14 resq 1
132 .Guest.r15 resq 1
133 .Guest.es.Sel resw 1
134 .Guest.es.PaddingSel resw 1
135 .Guest.es.ValidSel resw 1
136 .Guest.es.fFlags resw 1
137 .Guest.es.u64Base resq 1
138 .Guest.es.u32Limit resd 1
139 .Guest.es.Attr resd 1
140 .Guest.cs.Sel resw 1
141 .Guest.cs.PaddingSel resw 1
142 .Guest.cs.ValidSel resw 1
143 .Guest.cs.fFlags resw 1
144 .Guest.cs.u64Base resq 1
145 .Guest.cs.u32Limit resd 1
146 .Guest.cs.Attr resd 1
147 .Guest.ss.Sel resw 1
148 .Guest.ss.PaddingSel resw 1
149 .Guest.ss.ValidSel resw 1
150 .Guest.ss.fFlags resw 1
151 .Guest.ss.u64Base resq 1
152 .Guest.ss.u32Limit resd 1
153 .Guest.ss.Attr resd 1
154 .Guest.ds.Sel resw 1
155 .Guest.ds.PaddingSel resw 1
156 .Guest.ds.ValidSel resw 1
157 .Guest.ds.fFlags resw 1
158 .Guest.ds.u64Base resq 1
159 .Guest.ds.u32Limit resd 1
160 .Guest.ds.Attr resd 1
161 .Guest.fs.Sel resw 1
162 .Guest.fs.PaddingSel resw 1
163 .Guest.fs.ValidSel resw 1
164 .Guest.fs.fFlags resw 1
165 .Guest.fs.u64Base resq 1
166 .Guest.fs.u32Limit resd 1
167 .Guest.fs.Attr resd 1
168 .Guest.gs.Sel resw 1
169 .Guest.gs.PaddingSel resw 1
170 .Guest.gs.ValidSel resw 1
171 .Guest.gs.fFlags resw 1
172 .Guest.gs.u64Base resq 1
173 .Guest.gs.u32Limit resd 1
174 .Guest.gs.Attr resd 1
175 .Guest.eip resq 1
176 .Guest.eflags resq 1
177 .Guest.cr0 resq 1
178 .Guest.cr2 resq 1
179 .Guest.cr3 resq 1
180 .Guest.cr4 resq 1
181 .Guest.dr resq 8
182 .Guest.gdtrPadding resw 3
183 .Guest.gdtr resw 0
184 .Guest.gdtr.cbGdt resw 1
185 .Guest.gdtr.pGdt resq 1
186 .Guest.idtrPadding resw 3
187 .Guest.idtr resw 0
188 .Guest.idtr.cbIdt resw 1
189 .Guest.idtr.pIdt resq 1
190 .Guest.ldtr.Sel resw 1
191 .Guest.ldtr.PaddingSel resw 1
192 .Guest.ldtr.ValidSel resw 1
193 .Guest.ldtr.fFlags resw 1
194 .Guest.ldtr.u64Base resq 1
195 .Guest.ldtr.u32Limit resd 1
196 .Guest.ldtr.Attr resd 1
197 .Guest.tr.Sel resw 1
198 .Guest.tr.PaddingSel resw 1
199 .Guest.tr.ValidSel resw 1
200 .Guest.tr.fFlags resw 1
201 .Guest.tr.u64Base resq 1
202 .Guest.tr.u32Limit resd 1
203 .Guest.tr.Attr resd 1
204 .Guest.SysEnter.cs resb 8
205 .Guest.SysEnter.eip resb 8
206 .Guest.SysEnter.esp resb 8
207 .Guest.msrEFER resb 8
208 .Guest.msrSTAR resb 8
209 .Guest.msrPAT resb 8
210 .Guest.msrLSTAR resb 8
211 .Guest.msrCSTAR resb 8
212 .Guest.msrSFMASK resb 8
213 .Guest.msrKERNELGSBASE resb 8
214 .Guest.uMsrPadding0 resb 8
215
216 alignb 8
217 .Guest.fExtrn resq 1
218
219 alignb 32
220 .Guest.aPaePdpes resq 4
221
222 alignb 8
223 .Guest.aXcr resq 2
224 .Guest.fXStateMask resq 1
225 .Guest.fUsedFpuGuest resb 1
226 alignb 8
227 .Guest.aoffXState resw 64
228 alignb 256
229 .Guest.abXState resb 0x4000-0x300
230 .Guest.XState EQU .Guest.abXState
231
232;;
233 alignb 4096
234 .Guest.hwvirt resb 0
235 .Guest.hwvirt.svm resb 0
236 .Guest.hwvirt.vmx resb 0
237
238 .Guest.hwvirt.svm.Vmcb EQU .Guest.hwvirt.svm
239 .Guest.hwvirt.svm.abMsrBitmap EQU (.Guest.hwvirt.svm.Vmcb + 0x1000)
240 .Guest.hwvirt.svm.abIoBitmap EQU (.Guest.hwvirt.svm.abMsrBitmap + 0x2000)
241 .Guest.hwvirt.svm.uMsrHSavePa EQU (.Guest.hwvirt.svm.abIoBitmap + 0x3000) ; resq 1
242 .Guest.hwvirt.svm.GCPhysVmcb EQU (.Guest.hwvirt.svm.uMsrHSavePa + 8) ; resq 1
243 alignb 8
244 .Guest.hwvirt.svm.HostState EQU (.Guest.hwvirt.svm.GCPhysVmcb + 8) ; resb 184
245 .Guest.hwvirt.svm.uPrevPauseTick EQU (.Guest.hwvirt.svm.HostState + 184) ; resq 1
246 .Guest.hwvirt.svm.cPauseFilter EQU (.Guest.hwvirt.svm.uPrevPauseTick + 8) ; resw 1
247 .Guest.hwvirt.svm.cPauseFilterThreshold EQU (.Guest.hwvirt.svm.cPauseFilter + 2) ; resw 1
248 .Guest.hwvirt.svm.fInterceptEvents EQU (.Guest.hwvirt.svm.cPauseFilterThreshold + 2) ; resb 1
249
250 .Guest.hwvirt.vmx.Vmcs resb 0x1000
251 .Guest.hwvirt.vmx.ShadowVmcs resb 0x1000
252 .Guest.hwvirt.vmx.abVmreadBitmap resb 0x1000
253 .Guest.hwvirt.vmx.abVmwriteBitmap resb 0x1000
254 .Guest.hwvirt.vmx.aEntryMsrLoadArea resb 0x2000
255 .Guest.hwvirt.vmx.aExitMsrStoreArea resb 0x2000
256 .Guest.hwvirt.vmx.aExitMsrLoadArea resb 0x2000
257 .Guest.hwvirt.vmx.abMsrBitmap resb 0x1000
258 .Guest.hwvirt.vmx.abIoBitmap resb 0x1000+0x1000
259 .Guest.hwvirt.vmx.abVirtApicPage resb 0x1000
260 alignb 8
261 .Guest.hwvirt.vmx.GCPhysVmxon resq 1
262 .Guest.hwvirt.vmx.GCPhysVmcs resq 1
263 .Guest.hwvirt.vmx.GCPhysShadowVmcs resq 1
264 .Guest.hwvirt.vmx.enmDiag resd 1
265 .Guest.hwvirt.vmx.enmAbort resd 1
266 .Guest.hwvirt.vmx.uDiagAux resq 1
267 .Guest.hwvirt.vmx.uAbortAux resd 1
268 .Guest.hwvirt.vmx.fInVmxRootMode resb 1
269 .Guest.hwvirt.vmx.fInVmxNonRootMode resb 1
270 .Guest.hwvirt.vmx.fInterceptEvents resb 1
271 .Guest.hwvirt.vmx.fNmiUnblockingIret resb 1
272 .Guest.hwvirt.vmx.uFirstPauseLoopTick resq 1
273 .Guest.hwvirt.vmx.uPrevPauseTick resq 1
274 .Guest.hwvirt.vmx.uEntryTick resq 1
275 .Guest.hwvirt.vmx.offVirtApicWrite resw 1
276 .Guest.hwvirt.vmx.fVirtNmiBlocking resb 1
277 alignb 8
278 .Guest.hwvirt.vmx.Msrs resb 224
279
280 alignb 8
281 .Guest.hwvirt.enmHwvirt resd 1
282 .Guest.hwvirt.fGif resb 1
283 alignb 8
284 .Guest.hwvirt.fLocalForcedActions resd 1
285 alignb 64
286
287 .GuestMsrs resq 0
288 .GuestMsrs.au64 resq 64
289
290 ;
291 ; Other stuff.
292 ;
293 .hNestedVmxPreemptTimer resq 1
294
295 .fUseFlags resd 1
296 .fChanged resd 1
297 .u32RetCode resd 1
298 .fCpuIdApicFeatureVisible resb 1
299
300 ;
301 ; Host context state
302 ;
303 alignb 64
304 .Host resb 0
305 .Host.abXState resb 0x4000-0x300
306 .Host.XState EQU .Host.abXState
307 ;.Host.rax resq 1 - scratch
308 .Host.rbx resq 1
309 ;.Host.rcx resq 1 - scratch
310 ;.Host.rdx resq 1 - scratch
311 .Host.rdi resq 1
312 .Host.rsi resq 1
313 .Host.rbp resq 1
314 .Host.rsp resq 1
315 ;.Host.r8 resq 1 - scratch
316 ;.Host.r9 resq 1 - scratch
317 .Host.r10 resq 1
318 .Host.r11 resq 1
319 .Host.r12 resq 1
320 .Host.r13 resq 1
321 .Host.r14 resq 1
322 .Host.r15 resq 1
323 ;.Host.rip resd 1 - scratch
324 .Host.rflags resq 1
325 .Host.ss resw 1
326 .Host.ssPadding resw 1
327 .Host.gs resw 1
328 .Host.gsPadding resw 1
329 .Host.fs resw 1
330 .Host.fsPadding resw 1
331 .Host.es resw 1
332 .Host.esPadding resw 1
333 .Host.ds resw 1
334 .Host.dsPadding resw 1
335 .Host.cs resw 1
336 .Host.csPadding resw 1
337
338 .Host.cr0Fpu:
339 .Host.cr0 resq 1
340 ;.Host.cr2 resq 1 - scratch
341 .Host.cr3 resq 1
342 .Host.cr4 resq 1
343 .Host.cr8 resq 1
344
345 .Host.dr0 resq 1
346 .Host.dr1 resq 1
347 .Host.dr2 resq 1
348 .Host.dr3 resq 1
349 .Host.dr6 resq 1
350 .Host.dr7 resq 1
351
352 .Host.gdtr resb 10 ; GDT limit + linear address
353 .Host.gdtrPadding resw 1
354 .Host.idtr resb 10 ; IDT limit + linear address
355 .Host.idtrPadding resw 1
356 .Host.ldtr resw 1
357 .Host.ldtrPadding resw 1
358 .Host.tr resw 1
359 .Host.trPadding resw 1
360
361 .Host.SysEnter.cs resq 1
362 .Host.SysEnter.eip resq 1
363 .Host.SysEnter.esp resq 1
364 .Host.FSbase resq 1
365 .Host.GSbase resq 1
366 .Host.efer resq 1
367 alignb 8
368 .Host.xcr0 resq 1
369 .Host.fXStateMask resq 1
370
371 ;
372 ; Hypervisor Context.
373 ;
374 alignb 64
375 .Hyper resq 0
376 .Hyper.dr resq 8
377 .Hyper.cr3 resq 1
378 alignb 64
379
380%ifdef VBOX_WITH_CRASHDUMP_MAGIC
381 .aMagic resb 56
382 .uMagic resq 1
383%endif
384endstruc
385
386
387
388%if 0 ; Currently not used anywhere.
389;;
390; Macro for FXSAVE/FXRSTOR leaky behaviour on AMD CPUs, see cpumR3CheckLeakyFpu().
391;
392; Cleans the FPU state, if necessary, before restoring the FPU.
393;
394; This macro ASSUMES CR0.TS is not set!
395;
396; @param xDX Pointer to CPUMCPU.
397; @uses xAX, EFLAGS
398;
399; Changes here should also be reflected in CPUMRCA.asm's copy!
400;
401%macro CLEANFPU 0
402 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USE_FFXSR_LEAKY
403 jz .nothing_to_clean
404
405 xor eax, eax
406 fnstsw ax ; FSW -> AX.
407 test eax, RT_BIT(7) ; If FSW.ES (bit 7) is set, clear it to not cause FPU exceptions
408 ; while clearing & loading the FPU bits in 'clean_fpu' below.
409 jz .clean_fpu
410 fnclex
411
412.clean_fpu:
413 ffree st7 ; Clear FPU stack register(7)'s tag entry to prevent overflow if a wraparound occurs.
414 ; for the upcoming push (load)
415 fild dword [g_r32_Zero xWrtRIP] ; Explicit FPU load to overwrite FIP, FOP, FDP registers in the FPU.
416.nothing_to_clean:
417%endmacro
418%endif ; Unused.
419
420
421;;
422; Makes sure we don't trap (#NM) accessing the FPU.
423;
424; In ring-0 this is a bit of work since we may have try convince the host kernel
425; to do the work for us, also, we must report any CR0 changes back to HMR0VMX
426; via the VINF_CPUM_HOST_CR0_MODIFIED status code.
427;
428; If we end up clearing CR0.TS/EM ourselves in ring-0, we'll save the original
429; value in CPUMCPU.Host.cr0Fpu. If we don't, we'll store zero there. (See also
430; CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET.)
431;
432; In raw-mode we will always have to clear TS and it will be recalculated
433; elsewhere and thus needs no saving.
434;
435; @param %1 Register to return the return status code in.
436; @param %2 Temporary scratch register.
437; @param %3 Ring-0 only, register pointing to the CPUMCPU structure
438; of the EMT we're on.
439; @uses EFLAGS, CR0, %1, %2
440;
441%macro CPUMRZ_TOUCH_FPU_CLEAR_CR0_FPU_TRAPS_SET_RC 3
442 ;
443 ; ring-0 - slightly complicated (than old raw-mode).
444 ;
445 xor %1, %1 ; 0 / VINF_SUCCESS. Wishing for no CR0 changes.
446 mov [%3 + CPUMCPU.Host.cr0Fpu], %1
447
448 mov %2, cr0
449 test %2, X86_CR0_TS | X86_CR0_EM ; Make sure its safe to access the FPU state.
450 jz %%no_cr0_change
451
452 %ifdef VMM_R0_TOUCH_FPU
453 ; Touch the state and check that the kernel updated CR0 for us.
454 movdqa xmm0, xmm0
455 mov %2, cr0
456 test %2, X86_CR0_TS | X86_CR0_EM
457 jz %%cr0_changed
458 %endif
459
460 ; Save CR0 and clear them flags ourselves.
461 mov [%3 + CPUMCPU.Host.cr0Fpu], %2
462 and %2, ~(X86_CR0_TS | X86_CR0_EM)
463 mov cr0, %2
464
465%%cr0_changed:
466 mov %1, VINF_CPUM_HOST_CR0_MODIFIED
467%%no_cr0_change:
468%endmacro
469
470
471;;
472; Restore CR0 if CR0.TS or CR0.EM were non-zero in the original state.
473;
474; @param %1 The original state to restore (or zero).
475;
476%macro CPUMRZ_RESTORE_CR0_IF_TS_OR_EM_SET 1
477 test %1, X86_CR0_TS | X86_CR0_EM
478 jz %%skip_cr0_restore
479 mov cr0, %1
480%%skip_cr0_restore:
481%endmacro
482
483
484;;
485; Saves the host state.
486;
487; @uses rax, rdx
488; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
489; @param pXState Define for the register containing the extended state pointer.
490;
491%macro CPUMR0_SAVE_HOST 0
492 ;
493 ; Load a couple of registers we'll use later in all branches.
494 ;
495 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
496 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
497
498 ;
499 ; XSAVE or FXSAVE?
500 ;
501 or eax, eax
502 jz %%host_fxsave
503
504 ; XSAVE
505 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
506 %ifdef RT_ARCH_AMD64
507 o64 xsave [pXState]
508 %else
509 xsave [pXState]
510 %endif
511 jmp %%host_done
512
513 ; FXSAVE
514%%host_fxsave:
515 %ifdef RT_ARCH_AMD64
516 o64 fxsave [pXState] ; Use explicit REX prefix. See @bugref{6398}.
517 %else
518 fxsave [pXState]
519 %endif
520
521%%host_done:
522%endmacro ; CPUMR0_SAVE_HOST
523
524
525;;
526; Loads the host state.
527;
528; @uses rax, rdx
529; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
530; @param pXState Define for the register containing the extended state pointer.
531;
532%macro CPUMR0_LOAD_HOST 0
533 ;
534 ; Load a couple of registers we'll use later in all branches.
535 ;
536 lea pXState, [pCpumCpu + CPUMCPU.Host.XState]
537 mov eax, [pCpumCpu + CPUMCPU.Host.fXStateMask]
538
539 ;
540 ; XRSTOR or FXRSTOR?
541 ;
542 or eax, eax
543 jz %%host_fxrstor
544
545 ; XRSTOR
546 mov edx, [pCpumCpu + CPUMCPU.Host.fXStateMask + 4]
547 %ifdef RT_ARCH_AMD64
548 o64 xrstor [pXState]
549 %else
550 xrstor [pXState]
551 %endif
552 jmp %%host_done
553
554 ; FXRSTOR
555%%host_fxrstor:
556 %ifdef RT_ARCH_AMD64
557 o64 fxrstor [pXState] ; Use explicit REX prefix. See @bugref{6398}.
558 %else
559 fxrstor [pXState]
560 %endif
561
562%%host_done:
563%endmacro ; CPUMR0_LOAD_HOST
564
565
566
567;; Macro for XSAVE/FXSAVE for the guest FPU but tries to figure out whether to
568; save the 32-bit FPU state or 64-bit FPU state.
569;
570; @param %1 Pointer to CPUMCPU.
571; @param %2 Pointer to XState.
572; @param %3 Force AMD64
573; @param %4 The instruction to use (xsave or fxsave)
574; @uses xAX, xDX, EFLAGS, 20h of stack.
575;
576%macro SAVE_32_OR_64_FPU 4
577%if CPUM_IS_AMD64 || %3
578 ; Save the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
579 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
580 jnz short %%save_long_mode_guest
581%endif
582 %4 [pXState]
583%if CPUM_IS_AMD64 || %3
584 jmp %%save_done_32bit_cs_ds
585
586%%save_long_mode_guest:
587 o64 %4 [pXState]
588
589 xor edx, edx
590 cmp dword [pXState + X86FXSTATE.FPUCS], 0
591 jne short %%save_done
592
593 sub rsp, 20h ; Only need 1ch bytes but keep stack aligned otherwise we #GP(0).
594 fnstenv [rsp]
595 movzx eax, word [rsp + 10h]
596 mov [pXState + X86FXSTATE.FPUCS], eax
597 movzx eax, word [rsp + 18h]
598 add rsp, 20h
599 mov [pXState + X86FXSTATE.FPUDS], eax
600%endif
601%%save_done_32bit_cs_ds:
602 mov edx, X86_FXSTATE_RSVD_32BIT_MAGIC
603%%save_done:
604 mov dword [pXState + X86_OFF_FXSTATE_RSVD], edx
605%endmacro ; SAVE_32_OR_64_FPU
606
607
608;;
609; Save the guest state.
610;
611; @uses rax, rdx
612; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
613; @param pXState Define for the register containing the extended state pointer.
614;
615%macro CPUMR0_SAVE_GUEST 0
616 ;
617 ; Load a couple of registers we'll use later in all branches.
618 ;
619 %ifdef IN_RING0
620 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
621 %else
622 %error "Unsupported context!"
623 %endif
624 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
625
626 ;
627 ; XSAVE or FXSAVE?
628 ;
629 or eax, eax
630 jz %%guest_fxsave
631
632 ; XSAVE
633 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
634 %ifdef VBOX_WITH_KERNEL_USING_XMM
635 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Already saved in HMR0A.asm.
636 %endif
637 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, xsave
638 jmp %%guest_done
639
640 ; FXSAVE
641%%guest_fxsave:
642 SAVE_32_OR_64_FPU pCpumCpu, pXState, 0, fxsave
643
644%%guest_done:
645%endmacro ; CPUMR0_SAVE_GUEST
646
647
648;;
649; Wrapper for selecting 32-bit or 64-bit XRSTOR/FXRSTOR according to what SAVE_32_OR_64_FPU did.
650;
651; @param %1 Pointer to CPUMCPU.
652; @param %2 Pointer to XState.
653; @param %3 Force AMD64.
654; @param %4 The instruction to use (xrstor or fxrstor).
655; @uses xAX, xDX, EFLAGS
656;
657%macro RESTORE_32_OR_64_FPU 4
658%if CPUM_IS_AMD64 || %3
659 ; Restore the guest FPU (32-bit or 64-bit), preserves existing broken state. See @bugref{7138}.
660 test dword [pCpumCpu + CPUMCPU.fUseFlags], CPUM_USE_SUPPORTS_LONGMODE
661 jz %%restore_32bit_fpu
662 cmp dword [pXState + X86_OFF_FXSTATE_RSVD], X86_FXSTATE_RSVD_32BIT_MAGIC
663 jne short %%restore_64bit_fpu
664%%restore_32bit_fpu:
665%endif
666 %4 [pXState]
667%if CPUM_IS_AMD64 || %3
668 ; TODO: Restore XMM8-XMM15!
669 jmp short %%restore_fpu_done
670%%restore_64bit_fpu:
671 o64 %4 [pXState]
672%%restore_fpu_done:
673%endif
674%endmacro ; RESTORE_32_OR_64_FPU
675
676
677;;
678; Loads the guest state.
679;
680; @uses rax, rdx
681; @param pCpumCpu Define for the register containing the CPUMCPU pointer.
682; @param pXState Define for the register containing the extended state pointer.
683;
684%macro CPUMR0_LOAD_GUEST 0
685 ;
686 ; Load a couple of registers we'll use later in all branches.
687 ;
688 lea pXState, [pCpumCpu + CPUMCPU.Guest.XState]
689 mov eax, [pCpumCpu + CPUMCPU.Guest.fXStateMask]
690
691 ;
692 ; XRSTOR or FXRSTOR?
693 ;
694 or eax, eax
695 jz %%guest_fxrstor
696
697 ; XRSTOR
698 mov edx, [pCpumCpu + CPUMCPU.Guest.fXStateMask + 4]
699 %ifdef VBOX_WITH_KERNEL_USING_XMM
700 and eax, ~CPUM_VOLATILE_XSAVE_GUEST_COMPONENTS ; Will be loaded by HMR0A.asm.
701 %endif
702 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, xrstor
703 jmp %%guest_done
704
705 ; FXRSTOR
706%%guest_fxrstor:
707 RESTORE_32_OR_64_FPU pCpumCpu, pXState, 0, fxrstor
708
709%%guest_done:
710%endmacro ; CPUMR0_LOAD_GUEST
711
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette